From 4a963de27ffb88b719203423518d527332f23092 Mon Sep 17 00:00:00 2001 From: Sameer Magan Date: Thu, 11 Apr 2019 13:18:52 +0400 Subject: [PATCH 01/71] Support for scram sasl mechanisms --- README.md | 1 + kafka_exporter.go | 16 ++++++++++++++++ scram_client.go | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+) create mode 100644 scram_client.go diff --git a/README.md b/README.md index 7ead7cce..fe00c226 100644 --- a/README.md +++ b/README.md @@ -98,6 +98,7 @@ This image is configurable using different flags | sasl.handshake | true | Only set this to false if using a non-Kafka SASL proxy | | sasl.username | | SASL user name | | sasl.password | | SASL user password | +| sasl.mechanism | | SASL mechanism can be plain, sha512, sha256 | tls.enabled | false | Connect using TLS | | tls.ca-file | | The optional certificate authority file for TLS client authentication | | tls.cert-file | | The optional certificate file for client authentication | diff --git a/kafka_exporter.go b/kafka_exporter.go index 1b150b38..7bb34d34 100644 --- a/kafka_exporter.go +++ b/kafka_exporter.go @@ -66,6 +66,7 @@ type kafkaOpts struct { useSASLHandshake bool saslUsername string saslPassword string + saslMechanism string useTLS bool tlsCAFile string tlsCertFile string @@ -124,6 +125,20 @@ func NewExporter(opts kafkaOpts, topicFilter string, groupFilter string) (*Expor config.Version = kafkaVersion if opts.useSASL { + // Convert to lowercase so that SHA512 and SHA256 is still valid + opts.saslMechanism = strings.ToLower(opts.saslMechanism) + switch opts.saslMechanism { + case "sha512": + config.Net.SASL.SCRAMClient = &XDGSCRAMClient{HashGeneratorFcn: SHA512} + config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) + case "sha256": + config.Net.SASL.SCRAMClient = &XDGSCRAMClient{HashGeneratorFcn: SHA256} + config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) + case "plain": + default: + plog.Fatalf("invalid sasl mechanism \"%s\": can only be \"sha256\", \"sha512\" or \"plain\"", opts.saslMechanism) + } + config.Net.SASL.Enable = true config.Net.SASL.Handshake = opts.useSASLHandshake @@ -481,6 +496,7 @@ func main() { kingpin.Flag("sasl.handshake", "Only set this to false if using a non-Kafka SASL proxy.").Default("true").BoolVar(&opts.useSASLHandshake) kingpin.Flag("sasl.username", "SASL user name.").Default("").StringVar(&opts.saslUsername) kingpin.Flag("sasl.password", "SASL user password.").Default("").StringVar(&opts.saslPassword) + kingpin.Flag("sasl.mechanism", "The SASL SCRAM SHA algorithm sha256 or sha512 as mechanism").Default("").StringVar(&opts.saslMechanism) kingpin.Flag("tls.enabled", "Connect using TLS.").Default("false").BoolVar(&opts.useTLS) kingpin.Flag("tls.ca-file", "The optional certificate authority file for TLS client authentication.").Default("").StringVar(&opts.tlsCAFile) kingpin.Flag("tls.cert-file", "The optional certificate file for client authentication.").Default("").StringVar(&opts.tlsCertFile) diff --git a/scram_client.go b/scram_client.go new file mode 100644 index 00000000..6f622817 --- /dev/null +++ b/scram_client.go @@ -0,0 +1,36 @@ +package main + +import ( + "crypto/sha256" + "crypto/sha512" + "hash" + + "github.com/xdg/scram" +) + +var SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } +var SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } + +type XDGSCRAMClient struct { + *scram.Client + *scram.ClientConversation + scram.HashGeneratorFcn +} + +func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { + x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + if err != nil { + return err + } + x.ClientConversation = x.Client.NewConversation() + return nil +} + +func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { + response, err = x.ClientConversation.Step(challenge) + return +} + +func (x *XDGSCRAMClient) Done() bool { + return x.ClientConversation.Done() +} From 09cc043fae3812ba8b7b9c856ee93a40b7df2a60 Mon Sep 17 00:00:00 2001 From: Sameer Magan Date: Thu, 11 Apr 2019 13:20:51 +0400 Subject: [PATCH 02/71] updaed to use go modules --- go.mod | 31 +++++++++++++++++++++++ go.sum | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+) create mode 100644 go.mod create mode 100644 go.sum diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..da0b6512 --- /dev/null +++ b/go.mod @@ -0,0 +1,31 @@ +module github.com/danielqsj/kafka_exporter + +go 1.12 + +require ( + github.com/Shopify/sarama v1.22.0 + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 + github.com/davecgh/go-spew v1.1.1 + github.com/eapache/go-resiliency v1.1.0 + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 + github.com/eapache/queue v1.1.0 + github.com/golang/protobuf v1.2.0 + github.com/golang/snappy v0.0.1 + github.com/konsorten/go-windows-terminal-sequences v1.0.1 + github.com/krallistic/kazoo-go v0.0.0-20170526135507-a15279744f4e + github.com/matttproud/golang_protobuf_extensions v1.0.1 + github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 + github.com/prometheus/client_golang v0.8.0 + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 + github.com/prometheus/common v0.0.0-20181116084131-1f2c4f3cd6db + github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d + github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a + github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec + github.com/sirupsen/logrus v1.2.0 + github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c + golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 + golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e + gopkg.in/alecthomas/kingpin.v2 v2.2.6 +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..73a5066d --- /dev/null +++ b/go.sum @@ -0,0 +1,79 @@ +github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= +github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.22.0 h1:rtiODsvY4jW6nUV6n3K+0gx/8WlAwVt+Ixt6RIvpYyo= +github.com/Shopify/sarama v1.22.0/go.mod h1:lm3THZ8reqBDBQKQyb5HB3sY1lKp3grEbQ81aWSgPp4= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/krallistic/kazoo-go v0.0.0-20170526135507-a15279744f4e h1:IWiVY66Xy9YrDZ28qJMt1UTlh6x9UGW0aDH/o58CSnA= +github.com/krallistic/kazoo-go v0.0.0-20170526135507-a15279744f4e/go.mod h1:Rq6003vCNoJNrT6ol0hMebQ3GWLWXSHrD/QcMlXt0EE= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/pierrec/lz4 v0.0.0-20181005164709-635575b42742 h1:wKfigKMTgvSzBLIVvB5QaBBQI0odU6n45/UKSphjLus= +github.com/pierrec/lz4 v0.0.0-20181005164709-635575b42742/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 h1:GeinFsrjWz97fAxVUEd748aV0cYL+I6k44gFJTCVvpU= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181116084131-1f2c4f3cd6db h1:ckMAAQJ96ZKwKyiGamJdsinLn3D9+daeRlvvmYo9tkI= +github.com/prometheus/common v0.0.0-20181116084131-1f2c4f3cd6db/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869 h1:kkXA53yGe04D0adEYJwEVQjeBppL01Exg+fnMjfUraU= +golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 h1:bselrhR0Or1vomJZC8ZIjWtbDmn9OYFLX5Ik9alpJpE= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8 h1:YoY1wS6JYVRpIfFngRf2HHo9R9dAne3xbkGOQ5rJXjU= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From 2032654ca9aae68f3378679255a2b7b838427df9 Mon Sep 17 00:00:00 2001 From: Sameer Magan Date: Fri, 12 Apr 2019 21:38:07 +0400 Subject: [PATCH 03/71] changed mechanism type to have scram prefix --- README.md | 2 +- kafka_exporter.go | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index fe00c226..93841e17 100644 --- a/README.md +++ b/README.md @@ -98,7 +98,7 @@ This image is configurable using different flags | sasl.handshake | true | Only set this to false if using a non-Kafka SASL proxy | | sasl.username | | SASL user name | | sasl.password | | SASL user password | -| sasl.mechanism | | SASL mechanism can be plain, sha512, sha256 +| sasl.mechanism | | SASL mechanism can be plain, scram-sha512, scram-sha256 | tls.enabled | false | Connect using TLS | | tls.ca-file | | The optional certificate authority file for TLS client authentication | | tls.cert-file | | The optional certificate file for client authentication | diff --git a/kafka_exporter.go b/kafka_exporter.go index 7bb34d34..2c01b677 100644 --- a/kafka_exporter.go +++ b/kafka_exporter.go @@ -128,15 +128,16 @@ func NewExporter(opts kafkaOpts, topicFilter string, groupFilter string) (*Expor // Convert to lowercase so that SHA512 and SHA256 is still valid opts.saslMechanism = strings.ToLower(opts.saslMechanism) switch opts.saslMechanism { - case "sha512": + case "scram-sha512": config.Net.SASL.SCRAMClient = &XDGSCRAMClient{HashGeneratorFcn: SHA512} config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) - case "sha256": + case "scram-sha256": config.Net.SASL.SCRAMClient = &XDGSCRAMClient{HashGeneratorFcn: SHA256} config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) + case "plain": default: - plog.Fatalf("invalid sasl mechanism \"%s\": can only be \"sha256\", \"sha512\" or \"plain\"", opts.saslMechanism) + plog.Fatalf("invalid sasl mechanism \"%s\": can only be \"scram-sha256\", \"scram-sha512\" or \"plain\"", opts.saslMechanism) } config.Net.SASL.Enable = true From 211ab7595ec70c3ec2a2849c6e760ba19430a473 Mon Sep 17 00:00:00 2001 From: Sameer Magan Date: Sun, 14 Apr 2019 23:46:22 +0400 Subject: [PATCH 04/71] using latest commit on master for samara as it contains fix --- go.mod | 2 +- go.sum | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index da0b6512..6839deb8 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/danielqsj/kafka_exporter go 1.12 require ( - github.com/Shopify/sarama v1.22.0 + github.com/Shopify/sarama v1.22.1-0.20190412022128-c14cccae56fa github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 diff --git a/go.sum b/go.sum index 73a5066d..f6d27f5c 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,13 @@ github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.22.0 h1:rtiODsvY4jW6nUV6n3K+0gx/8WlAwVt+Ixt6RIvpYyo= github.com/Shopify/sarama v1.22.0/go.mod h1:lm3THZ8reqBDBQKQyb5HB3sY1lKp3grEbQ81aWSgPp4= +github.com/Shopify/sarama v1.22.1-0.20190412022128-c14cccae56fa h1:UMPk5t5qV+uZE+TtnTJTqvr0y97abIrqnQNFgDY/ymY= +github.com/Shopify/sarama v1.22.1-0.20190412022128-c14cccae56fa/go.mod h1:FRzlvRpMFO/639zY1SDxUxkqH97Y0ndM5CbGj6oG3As= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= From 467c68f880a5f5f5d79c3f91dfe982c7477c38e6 Mon Sep 17 00:00:00 2001 From: Sameer Magan Date: Mon, 27 May 2019 08:30:29 +0400 Subject: [PATCH 05/71] updated SCRAM to use generator function in the case of creating multiple connections --- go.mod | 2 +- go.sum | 2 ++ kafka_exporter.go | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6839deb8..3fbe4f53 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/danielqsj/kafka_exporter go 1.12 require ( - github.com/Shopify/sarama v1.22.1-0.20190412022128-c14cccae56fa + github.com/Shopify/sarama v1.22.1 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 diff --git a/go.sum b/go.sum index f6d27f5c..88da9464 100644 --- a/go.sum +++ b/go.sum @@ -8,6 +8,8 @@ github.com/Shopify/sarama v1.22.0 h1:rtiODsvY4jW6nUV6n3K+0gx/8WlAwVt+Ixt6RIvpYyo github.com/Shopify/sarama v1.22.0/go.mod h1:lm3THZ8reqBDBQKQyb5HB3sY1lKp3grEbQ81aWSgPp4= github.com/Shopify/sarama v1.22.1-0.20190412022128-c14cccae56fa h1:UMPk5t5qV+uZE+TtnTJTqvr0y97abIrqnQNFgDY/ymY= github.com/Shopify/sarama v1.22.1-0.20190412022128-c14cccae56fa/go.mod h1:FRzlvRpMFO/639zY1SDxUxkqH97Y0ndM5CbGj6oG3As= +github.com/Shopify/sarama v1.22.1 h1:exyEsKLGyCsDiqpV5Lr4slFi8ev2KiM3cP1KZ6vnCQ0= +github.com/Shopify/sarama v1.22.1/go.mod h1:FRzlvRpMFO/639zY1SDxUxkqH97Y0ndM5CbGj6oG3As= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= diff --git a/kafka_exporter.go b/kafka_exporter.go index 2c01b677..d1d76413 100644 --- a/kafka_exporter.go +++ b/kafka_exporter.go @@ -129,10 +129,10 @@ func NewExporter(opts kafkaOpts, topicFilter string, groupFilter string) (*Expor opts.saslMechanism = strings.ToLower(opts.saslMechanism) switch opts.saslMechanism { case "scram-sha512": - config.Net.SASL.SCRAMClient = &XDGSCRAMClient{HashGeneratorFcn: SHA512} + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} } config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) case "scram-sha256": - config.Net.SASL.SCRAMClient = &XDGSCRAMClient{HashGeneratorFcn: SHA256} + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} } config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) case "plain": From a402bdc8b9b055fd6340a9315a25793e6fe5bfdb Mon Sep 17 00:00:00 2001 From: Abhishek Jaisingh Date: Tue, 10 Dec 2019 23:27:24 +0530 Subject: [PATCH 06/71] Add Helm Chart Change-Id: I207384a31937f388e4311da9d917eece831733f1 --- charts/kafka-exporter/.helmignore | 22 +++++ charts/kafka-exporter/Chart.yaml | 9 ++ charts/kafka-exporter/templates/NOTES.txt | 5 ++ charts/kafka-exporter/templates/_helpers.tpl | 32 +++++++ .../kafka-exporter/templates/deployment.yaml | 85 +++++++++++++++++++ charts/kafka-exporter/templates/secret.yaml | 15 ++++ charts/kafka-exporter/templates/service.yaml | 19 +++++ .../templates/servicemonitor.yaml | 32 +++++++ charts/kafka-exporter/values.yaml | 67 +++++++++++++++ 9 files changed, 286 insertions(+) create mode 100644 charts/kafka-exporter/.helmignore create mode 100644 charts/kafka-exporter/Chart.yaml create mode 100644 charts/kafka-exporter/templates/NOTES.txt create mode 100644 charts/kafka-exporter/templates/_helpers.tpl create mode 100644 charts/kafka-exporter/templates/deployment.yaml create mode 100644 charts/kafka-exporter/templates/secret.yaml create mode 100644 charts/kafka-exporter/templates/service.yaml create mode 100644 charts/kafka-exporter/templates/servicemonitor.yaml create mode 100644 charts/kafka-exporter/values.yaml diff --git a/charts/kafka-exporter/.helmignore b/charts/kafka-exporter/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/charts/kafka-exporter/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/kafka-exporter/Chart.yaml b/charts/kafka-exporter/Chart.yaml new file mode 100644 index 00000000..9df77a26 --- /dev/null +++ b/charts/kafka-exporter/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka-exporter +version: 1.0.0 +home: https://github.com/abhishekjiitr/kafka-exporter-helm +maintainers: + - name: abhishekjiitr + email: abhi2254015@gmail.com diff --git a/charts/kafka-exporter/templates/NOTES.txt b/charts/kafka-exporter/templates/NOTES.txt new file mode 100644 index 00000000..d9eb9e80 --- /dev/null +++ b/charts/kafka-exporter/templates/NOTES.txt @@ -0,0 +1,5 @@ +1.To see the metrics +{{- if contains "ClusterIP" .Values.service.type }} + kubectl port-forward svc/{{ include "kafka-exporter.fullname" . }} {{ .Values.service.port }} + echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application" +{{- end }} diff --git a/charts/kafka-exporter/templates/_helpers.tpl b/charts/kafka-exporter/templates/_helpers.tpl new file mode 100644 index 00000000..bc51bbfc --- /dev/null +++ b/charts/kafka-exporter/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kafka-exporter.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kafka-exporter.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kafka-exporter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/kafka-exporter/templates/deployment.yaml b/charts/kafka-exporter/templates/deployment.yaml new file mode 100644 index 00000000..69563bd2 --- /dev/null +++ b/charts/kafka-exporter/templates/deployment.yaml @@ -0,0 +1,85 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kafka-exporter.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} + helm.sh/chart: {{ include "kafka-exporter.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + {{- if .Values.kafkaExporter}} + {{- range .Values.kafkaExporter.kafka.servers }} + - "--kafka.server={{ . }}" + {{- end }} + {{- if .Values.kafkaExporter.kafka.version }} + - --kafka.version={{ .Values.kafkaExporter.kafka.version }} + {{- end }} + {{- end}} + {{- if .Values.kafkaExporter.sasl.enabled }} + - --sasl.enabled + {{- if not .Values.kafkaExporter.sasl.handshake }} + - --sasl.handshake=false + {{- end }} + - --sasl.username={{ .Values.kafkaExporter.sasl.username }} + - --sasl.password={{ .Values.kafkaExporter.sasl.password }} + {{- end }} + {{- if .Values.kafkaExporter.tls.enabled}} + - --tls.enabled + - --tls.ca-file=/etc/tls-certs/ca-file + - --tls.cert-file=/etc/tls-certs/cert-file + - --tls.key-file=/etc/tls-certs/key-file + {{- end }} + {{- if .Values.kafkaExporter.log }} + - --log.level={{ .Values.kafkaExporter.log.level }} + {{- if .Values.kafkaExporter.log.enableSarama }} + - --log.enable-sarama + {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9308 + protocol: TCP + {{- if .Values.kafkaExporter.tls.enabled }} + volumeMounts: + - name: tls-certs + mountPath: "/etc/tls-certs/" + readOnly: true + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.kafkaExporter.tls.enabled }} + volumes: + - name: tls-certs + secret: + secretName: {{ include "kafka-exporter.fullname" . }} + {{- end }} diff --git a/charts/kafka-exporter/templates/secret.yaml b/charts/kafka-exporter/templates/secret.yaml new file mode 100644 index 00000000..82f567f3 --- /dev/null +++ b/charts/kafka-exporter/templates/secret.yaml @@ -0,0 +1,15 @@ +{{- if .Values.kafkaExporter.tls.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "kafka-exporter.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} + helm.sh/chart: {{ include "kafka-exporter.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: + ca-file: {{ .Values.kafkaExporter.tls.caFile | b64enc }} + cert-file: {{ .Values.kafkaExporter.tls.certFile | b64enc }} + key-file: {{ .Values.kafkaExporter.tls.keyFile | b64enc }} +{{- end }} diff --git a/charts/kafka-exporter/templates/service.yaml b/charts/kafka-exporter/templates/service.yaml new file mode 100644 index 00000000..049041fb --- /dev/null +++ b/charts/kafka-exporter/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "kafka-exporter.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} + helm.sh/chart: {{ include "kafka-exporter.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: metrics + protocol: TCP + name: metrics + selector: + app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/kafka-exporter/templates/servicemonitor.yaml b/charts/kafka-exporter/templates/servicemonitor.yaml new file mode 100644 index 00000000..52f0bb40 --- /dev/null +++ b/charts/kafka-exporter/templates/servicemonitor.yaml @@ -0,0 +1,32 @@ +{{- if .Values.prometheus.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "kafka-exporter.fullname" . }} + {{- if .Values.prometheus.serviceMonitor.namespace }} + namespace: {{ .Values.prometheus.serviceMonitor.namespace }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} + helm.sh/chart: {{ include "kafka-exporter.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- if .Values.prometheus.serviceMonitor.additionalLabels }} +{{ toYaml .Values.prometheus.serviceMonitor.additionalLabels | indent 4 -}} + {{- end }} +spec: + jobLabel: jobLabel + selector: + matchLabels: + app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} + helm.sh/chart: {{ include "kafka-exporter.chart" . }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + endpoints: + - port: metrics + interval: {{ .Values.prometheus.serviceMonitor.interval }} + {{- if .Values.prometheus.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.serviceMonitor.scrapeTimeout }} + {{- end }} +{{- end }} diff --git a/charts/kafka-exporter/values.yaml b/charts/kafka-exporter/values.yaml new file mode 100644 index 00000000..c1892977 --- /dev/null +++ b/charts/kafka-exporter/values.yaml @@ -0,0 +1,67 @@ +# Default values for kafka-exporter. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: danielqsj/kafka-exporter + tag: latest + pullPolicy: IfNotPresent + +nameOverride: "" +fullnameOverride: "" + +service: + type: ClusterIP + port: 9308 + +kafkaExporter: + kafka: + servers: [] + # - kafka:9092 + # version: "1.0.0" + + sasl: + enabled: false + handshake: true + username: "" + password: "" + + tls: + enabled: false + insecure-skip-tls-verify: false + caFile: "" + certFile: "" + keyFile: "" + + log: + level: info + enableSarama: false + +prometheus: + serviceMonitor: + enabled: true + namespace: monitoring + interval: "30s" + additionalLabels: + app: kafka-exporter + + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} From 35e6a82fa38f09a7b34b3d69a07629438a11d662 Mon Sep 17 00:00:00 2001 From: Alexis Vanier Date: Wed, 17 Feb 2021 10:37:10 -0500 Subject: [PATCH 07/71] Add configurable labels to everything This allows adding configurable labels to every object provided by this deployment. --- charts/kafka-exporter/templates/deployment.yaml | 6 ++++++ charts/kafka-exporter/templates/secret.yaml | 3 +++ charts/kafka-exporter/templates/service.yaml | 3 +++ charts/kafka-exporter/templates/servicemonitor.yaml | 3 +++ charts/kafka-exporter/values.yaml | 2 ++ 5 files changed, 17 insertions(+) diff --git a/charts/kafka-exporter/templates/deployment.yaml b/charts/kafka-exporter/templates/deployment.yaml index 69563bd2..5923c606 100644 --- a/charts/kafka-exporter/templates/deployment.yaml +++ b/charts/kafka-exporter/templates/deployment.yaml @@ -7,6 +7,9 @@ metadata: helm.sh/chart: {{ include "kafka-exporter.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- if .Values.labels -}} + {{ .Values.labels | toYaml | nindent 4 -}} + {{- end }} spec: replicas: {{ .Values.replicaCount }} selector: @@ -18,6 +21,9 @@ spec: labels: app.kubernetes.io/name: {{ include "kafka-exporter.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.podLabels -}} + {{ .Values.podLabels | toYaml | nindent 8 -}} + {{- end }} spec: containers: - name: {{ .Chart.Name }} diff --git a/charts/kafka-exporter/templates/secret.yaml b/charts/kafka-exporter/templates/secret.yaml index 82f567f3..e2249e89 100644 --- a/charts/kafka-exporter/templates/secret.yaml +++ b/charts/kafka-exporter/templates/secret.yaml @@ -8,6 +8,9 @@ metadata: helm.sh/chart: {{ include "kafka-exporter.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- if .Values.labels -}} + {{- .Values.labels | toYaml | nindent 4 }} + {{- end }} data: ca-file: {{ .Values.kafkaExporter.tls.caFile | b64enc }} cert-file: {{ .Values.kafkaExporter.tls.certFile | b64enc }} diff --git a/charts/kafka-exporter/templates/service.yaml b/charts/kafka-exporter/templates/service.yaml index 049041fb..61e02bd8 100644 --- a/charts/kafka-exporter/templates/service.yaml +++ b/charts/kafka-exporter/templates/service.yaml @@ -7,6 +7,9 @@ metadata: helm.sh/chart: {{ include "kafka-exporter.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- if .Values.labels -}} + {{ .Values.labels | toYaml | nindent 4 -}} + {{- end }} spec: type: {{ .Values.service.type }} ports: diff --git a/charts/kafka-exporter/templates/servicemonitor.yaml b/charts/kafka-exporter/templates/servicemonitor.yaml index 52f0bb40..5d4a3991 100644 --- a/charts/kafka-exporter/templates/servicemonitor.yaml +++ b/charts/kafka-exporter/templates/servicemonitor.yaml @@ -11,6 +11,9 @@ metadata: helm.sh/chart: {{ include "kafka-exporter.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- if .Values.labels -}} + {{ .Values.labels | toYaml | nindent 4 -}} + {{- end }} {{- if .Values.prometheus.serviceMonitor.additionalLabels }} {{ toYaml .Values.prometheus.serviceMonitor.additionalLabels | indent 4 -}} {{- end }} diff --git a/charts/kafka-exporter/values.yaml b/charts/kafka-exporter/values.yaml index c1892977..3aee5fbb 100644 --- a/charts/kafka-exporter/values.yaml +++ b/charts/kafka-exporter/values.yaml @@ -47,6 +47,8 @@ prometheus: additionalLabels: app: kafka-exporter +labels: {} +podLabels: {} resources: {} # We usually recommend not to specify default resources and to leave this as a conscious From 4bb6bab7f6574f2650501eccaa076cda76cbdd9d Mon Sep 17 00:00:00 2001 From: Alexis Vanier Date: Wed, 17 Feb 2021 10:43:00 -0500 Subject: [PATCH 08/71] Bump chart version to 1.1.0 --- charts/kafka-exporter/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/kafka-exporter/Chart.yaml b/charts/kafka-exporter/Chart.yaml index 9df77a26..40d6efa4 100644 --- a/charts/kafka-exporter/Chart.yaml +++ b/charts/kafka-exporter/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: "1.0" description: A Helm chart for Kubernetes name: kafka-exporter -version: 1.0.0 +version: 1.1.0 home: https://github.com/abhishekjiitr/kafka-exporter-helm maintainers: - name: abhishekjiitr From 450968d275ea5698776f12dbb0f037e196f0fe4f Mon Sep 17 00:00:00 2001 From: Mikhail Vladimirovich Nacharov Date: Wed, 24 Feb 2021 17:47:38 +0500 Subject: [PATCH 09/71] fix: liveness and readiness probes added to k8s specs --- .../kafka-exporter/templates/deployment.yaml | 21 +++++++++++++++++++ kafka_exporter.go | 4 ++++ 2 files changed, 25 insertions(+) diff --git a/charts/kafka-exporter/templates/deployment.yaml b/charts/kafka-exporter/templates/deployment.yaml index 69563bd2..f0805140 100644 --- a/charts/kafka-exporter/templates/deployment.yaml +++ b/charts/kafka-exporter/templates/deployment.yaml @@ -56,6 +56,27 @@ spec: - name: metrics containerPort: 9308 protocol: TCP + livenessProbe: + failureThreshold: 1 + httpGet: + path: /healthz + port: metrics + scheme: HTTP + initialDelaySeconds: 3 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 9 + readinessProbe: + failureThreshold: 1 + httpGet: + path: /healthz + port: metrics + scheme: HTTP + initialDelaySeconds: 3 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 9 + {{- if .Values.kafkaExporter.tls.enabled }} volumeMounts: - name: tls-certs diff --git a/kafka_exporter.go b/kafka_exporter.go index d1d76413..fc438509 100644 --- a/kafka_exporter.go +++ b/kafka_exporter.go @@ -637,6 +637,10 @@ func main() { `)) }) + http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { + // need more specific sarama check + w.Write([]byte("ok")) + }) plog.Infoln("Listening on", *listenAddress) plog.Fatal(http.ListenAndServe(*listenAddress, nil)) From a09bb01a230fed7ab42f2fce6c43c7650170515a Mon Sep 17 00:00:00 2001 From: danielqsj Date: Tue, 16 Mar 2021 12:03:39 +0800 Subject: [PATCH 10/71] upgrade sarama to 1.28.0, go to 1.16 --- .DS_Store | Bin 8196 -> 8196 bytes .travis.yml | 2 +- Makefile | 2 +- go.mod | 40 +- go.sum | 691 + original.log | 744 - .../Shopify/sarama/.github/CONTRIBUTING.md | 31 - .../Shopify/sarama/.github/ISSUE_TEMPLATE.md | 20 - vendor/github.com/Shopify/sarama/.gitignore | 2 + .../github.com/Shopify/sarama/.golangci.yml | 77 + vendor/github.com/Shopify/sarama/.travis.yml | 36 - vendor/github.com/Shopify/sarama/CHANGELOG.md | 426 + vendor/github.com/Shopify/sarama/Makefile | 49 +- vendor/github.com/Shopify/sarama/README.md | 27 +- vendor/github.com/Shopify/sarama/Vagrantfile | 10 +- .../github.com/Shopify/sarama/acl_bindings.go | 29 +- .../Shopify/sarama/acl_create_request.go | 29 +- .../Shopify/sarama/acl_create_request_test.go | 34 - .../Shopify/sarama/acl_create_response.go | 12 +- .../sarama/acl_create_response_test.go | 41 - .../Shopify/sarama/acl_delete_request.go | 18 +- .../Shopify/sarama/acl_delete_request_test.go | 69 - .../Shopify/sarama/acl_delete_response.go | 36 +- .../sarama/acl_delete_response_test.go | 38 - .../Shopify/sarama/acl_describe_request.go | 18 +- .../sarama/acl_describe_request_test.go | 35 - .../Shopify/sarama/acl_describe_response.go | 15 +- .../sarama/acl_describe_response_test.go | 45 - .../github.com/Shopify/sarama/acl_filter.go | 29 +- vendor/github.com/Shopify/sarama/acl_types.go | 69 +- .../sarama/add_offsets_to_txn_request.go | 5 + .../sarama/add_offsets_to_txn_request_test.go | 23 - .../sarama/add_offsets_to_txn_response.go | 5 + .../add_offsets_to_txn_response_test.go | 22 - .../sarama/add_partitions_to_txn_request.go | 5 + .../add_partitions_to_txn_request_test.go | 27 - .../sarama/add_partitions_to_txn_response.go | 6 + .../add_partitions_to_txn_response_test.go | 31 - vendor/github.com/Shopify/sarama/admin.go | 682 +- .../github.com/Shopify/sarama/admin_test.go | 501 - .../Shopify/sarama/alter_configs_request.go | 50 +- .../sarama/alter_configs_request_test.go | 86 - .../Shopify/sarama/alter_configs_response.go | 46 +- .../sarama/alter_configs_response_test.go | 45 - .../alter_partition_reassignments_request.go | 130 + .../alter_partition_reassignments_response.go | 157 + .../Shopify/sarama/api_versions_request.go | 15 +- .../sarama/api_versions_request_test.go | 14 - .../Shopify/sarama/api_versions_response.go | 6 + .../sarama/api_versions_response_test.go | 32 - .../Shopify/sarama/async_producer.go | 390 +- .../Shopify/sarama/async_producer_test.go | 845 - .../Shopify/sarama/balance_strategy.go | 1032 +- .../Shopify/sarama/balance_strategy_test.go | 102 - vendor/github.com/Shopify/sarama/broker.go | 733 +- .../github.com/Shopify/sarama/broker_test.go | 358 - vendor/github.com/Shopify/sarama/client.go | 288 +- .../github.com/Shopify/sarama/client_test.go | 661 - .../Shopify/sarama/client_tls_test.go | 206 - vendor/github.com/Shopify/sarama/compress.go | 194 + vendor/github.com/Shopify/sarama/config.go | 238 +- .../Shopify/sarama/config_resource_type.go | 17 +- .../github.com/Shopify/sarama/config_test.go | 255 - vendor/github.com/Shopify/sarama/consumer.go | 284 +- .../Shopify/sarama/consumer_group.go | 234 +- .../Shopify/sarama/consumer_group_members.go | 2 + .../sarama/consumer_group_members_test.go | 73 - .../Shopify/sarama/consumer_group_test.go | 58 - .../sarama/consumer_metadata_request.go | 5 + .../sarama/consumer_metadata_request_test.go | 23 - .../sarama/consumer_metadata_response.go | 5 + .../sarama/consumer_metadata_response_test.go | 44 - .../Shopify/sarama/consumer_test.go | 1036 - .../Shopify/sarama/control_record.go | 72 + .../github.com/Shopify/sarama/crc32_field.go | 17 + .../sarama/create_partitions_request.go | 4 + .../sarama/create_partitions_request_test.go | 50 - .../sarama/create_partitions_response.go | 17 +- .../sarama/create_partitions_response_test.go | 52 - .../Shopify/sarama/create_topics_request.go | 4 + .../sarama/create_topics_request_test.go | 50 - .../Shopify/sarama/create_topics_response.go | 17 +- .../sarama/create_topics_response_test.go | 52 - .../github.com/Shopify/sarama/decompress.go | 63 + .../Shopify/sarama/delete_groups_request.go | 4 + .../sarama/delete_groups_request_test.go | 34 - .../Shopify/sarama/delete_groups_response.go | 4 + .../sarama/delete_groups_response_test.go | 57 - .../Shopify/sarama/delete_records_request.go | 4 + .../sarama/delete_records_request_test.go | 36 - .../Shopify/sarama/delete_records_response.go | 4 + .../sarama/delete_records_response_test.go | 39 - .../Shopify/sarama/delete_topics_request.go | 4 + .../sarama/delete_topics_request_test.go | 33 - .../Shopify/sarama/delete_topics_response.go | 4 + .../sarama/delete_topics_response_test.go | 36 - .../sarama/describe_configs_request.go | 37 +- .../sarama/describe_configs_request_test.go | 90 - .../sarama/describe_configs_response.go | 163 +- .../sarama/describe_configs_response_test.go | 60 - .../Shopify/sarama/describe_groups_request.go | 4 + .../sarama/describe_groups_request_test.go | 34 - .../sarama/describe_groups_response.go | 4 + .../sarama/describe_groups_response_test.go | 91 - .../sarama/describe_log_dirs_request.go | 87 + .../sarama/describe_log_dirs_response.go | 229 + vendor/github.com/Shopify/sarama/dev.yml | 2 +- .../Shopify/sarama/docker-compose.yml | 134 + .../Shopify/sarama/encoder_decoder.go | 5 + .../Shopify/sarama/end_txn_request.go | 4 + .../Shopify/sarama/end_txn_request_test.go | 23 - .../Shopify/sarama/end_txn_response.go | 4 + .../Shopify/sarama/end_txn_response_test.go | 22 - vendor/github.com/Shopify/sarama/errors.go | 107 + .../Shopify/sarama/examples/README.md | 9 - .../sarama/examples/http_server/.gitignore | 2 - .../sarama/examples/http_server/README.md | 7 - .../examples/http_server/http_server.go | 247 - .../examples/http_server/http_server_test.go | 109 - .../Shopify/sarama/fetch_request.go | 157 +- .../Shopify/sarama/fetch_request_test.go | 48 - .../Shopify/sarama/fetch_response.go | 188 +- .../Shopify/sarama/fetch_response_test.go | 331 - .../sarama/find_coordinator_request.go | 8 +- .../sarama/find_coordinator_request_test.go | 33 - .../sarama/find_coordinator_response.go | 4 + .../sarama/find_coordinator_response_test.go | 83 - .../Shopify/sarama/functional_client_test.go | 90 - .../sarama/functional_consumer_group_test.go | 418 - .../sarama/functional_consumer_test.go | 226 - .../sarama/functional_offset_manager_test.go | 47 - .../sarama/functional_producer_test.go | 323 - .../Shopify/sarama/functional_test.go | 148 - vendor/github.com/Shopify/sarama/go.mod | 28 + vendor/github.com/Shopify/sarama/go.sum | 98 + .../Shopify/sarama/gssapi_kerberos.go | 258 + .../Shopify/sarama/heartbeat_request.go | 4 + .../Shopify/sarama/heartbeat_request_test.go | 21 - .../Shopify/sarama/heartbeat_response.go | 4 + .../Shopify/sarama/heartbeat_response_test.go | 18 - .../sarama/init_producer_id_request.go | 4 + .../sarama/init_producer_id_request_test.go | 31 - .../sarama/init_producer_id_response.go | 4 + .../sarama/init_producer_id_response_test.go | 37 - .../github.com/Shopify/sarama/interceptors.go | 43 + .../Shopify/sarama/join_group_request.go | 4 + .../Shopify/sarama/join_group_request_test.go | 83 - .../Shopify/sarama/join_group_response.go | 4 + .../sarama/join_group_response_test.go | 172 - .../Shopify/sarama/kerberos_client.go | 46 + .../Shopify/sarama/leave_group_request.go | 4 + .../sarama/leave_group_request_test.go | 19 - .../Shopify/sarama/leave_group_response.go | 4 + .../sarama/leave_group_response_test.go | 24 - .../github.com/Shopify/sarama/length_field.go | 19 +- .../Shopify/sarama/list_groups_request.go | 4 + .../sarama/list_groups_request_test.go | 7 - .../Shopify/sarama/list_groups_response.go | 4 + .../sarama/list_groups_response_test.go | 58 - .../list_partition_reassignments_request.go | 98 + .../list_partition_reassignments_response.go | 169 + vendor/github.com/Shopify/sarama/message.go | 133 +- .../github.com/Shopify/sarama/message_set.go | 5 +- .../github.com/Shopify/sarama/message_test.go | 196 - .../Shopify/sarama/metadata_request.go | 15 +- .../Shopify/sarama/metadata_request_test.go | 76 - .../Shopify/sarama/metadata_response.go | 8 +- .../Shopify/sarama/metadata_response_test.go | 290 - vendor/github.com/Shopify/sarama/metrics.go | 8 - .../github.com/Shopify/sarama/metrics_test.go | 172 - .../github.com/Shopify/sarama/mockbroker.go | 195 +- .../github.com/Shopify/sarama/mockkerberos.go | 123 + .../Shopify/sarama/mockresponses.go | 630 +- .../github.com/Shopify/sarama/mocks/README.md | 13 - .../Shopify/sarama/mocks/async_producer.go | 173 - .../sarama/mocks/async_producer_test.go | 132 - .../Shopify/sarama/mocks/consumer.go | 315 - .../Shopify/sarama/mocks/consumer_test.go | 249 - .../github.com/Shopify/sarama/mocks/mocks.go | 48 - .../Shopify/sarama/mocks/sync_producer.go | 157 - .../sarama/mocks/sync_producer_test.go | 250 - .../Shopify/sarama/offset_commit_request.go | 16 +- .../sarama/offset_commit_request_test.go | 90 - .../Shopify/sarama/offset_commit_response.go | 33 +- .../sarama/offset_commit_response_test.go | 24 - .../Shopify/sarama/offset_fetch_request.go | 43 +- .../sarama/offset_fetch_request_test.go | 31 - .../Shopify/sarama/offset_fetch_response.go | 118 +- .../sarama/offset_fetch_response_test.go | 22 - .../Shopify/sarama/offset_manager.go | 47 +- .../Shopify/sarama/offset_manager_test.go | 421 - .../Shopify/sarama/offset_request.go | 40 +- .../Shopify/sarama/offset_request_test.go | 43 - .../Shopify/sarama/offset_response.go | 4 + .../Shopify/sarama/offset_response_test.go | 111 - .../Shopify/sarama/packet_decoder.go | 7 + .../Shopify/sarama/packet_encoder.go | 7 + .../Shopify/sarama/partitioner_test.go | 283 - .../github.com/Shopify/sarama/prep_encoder.go | 49 + .../Shopify/sarama/produce_request.go | 6 + .../Shopify/sarama/produce_request_test.go | 106 - .../Shopify/sarama/produce_response.go | 63 +- .../Shopify/sarama/produce_response_test.go | 128 - .../github.com/Shopify/sarama/produce_set.go | 49 +- .../Shopify/sarama/produce_set_test.go | 255 - .../github.com/Shopify/sarama/real_decoder.go | 106 +- .../github.com/Shopify/sarama/real_encoder.go | 52 + vendor/github.com/Shopify/sarama/record.go | 9 +- .../github.com/Shopify/sarama/record_batch.go | 89 +- .../github.com/Shopify/sarama/record_test.go | 292 - vendor/github.com/Shopify/sarama/records.go | 17 +- .../github.com/Shopify/sarama/records_test.go | 143 - vendor/github.com/Shopify/sarama/request.go | 72 +- .../github.com/Shopify/sarama/request_test.go | 105 - .../Shopify/sarama/response_header.go | 12 +- .../Shopify/sarama/response_header_test.go | 21 - vendor/github.com/Shopify/sarama/sarama.go | 61 +- .../sarama/sasl_authenticate_request.go | 33 + .../sarama/sasl_authenticate_response.go | 48 + .../Shopify/sarama/sasl_handshake_request.go | 7 +- .../sarama/sasl_handshake_request_test.go | 17 - .../Shopify/sarama/sasl_handshake_response.go | 4 + .../sarama/sasl_handshake_response_test.go | 24 - .../sarama/sticky_assignor_user_data.go | 124 + .../Shopify/sarama/sync_group_request.go | 4 + .../Shopify/sarama/sync_group_request_test.go | 38 - .../Shopify/sarama/sync_group_response.go | 4 + .../sarama/sync_group_response_test.go | 40 - .../Shopify/sarama/sync_producer_test.go | 199 - .../github.com/Shopify/sarama/tools/README.md | 10 - .../tools/kafka-console-consumer/.gitignore | 2 - .../tools/kafka-console-consumer/README.md | 29 - .../kafka-console-consumer.go | 145 - .../.gitignore | 2 - .../kafka-console-partitionconsumer/README.md | 28 - .../kafka-console-partitionconsumer.go | 102 - .../tools/kafka-console-producer/.gitignore | 2 - .../tools/kafka-console-producer/README.md | 34 - .../kafka-console-producer.go | 124 - .../sarama/txn_offset_commit_request.go | 4 + .../sarama/txn_offset_commit_request_test.go | 35 - .../sarama/txn_offset_commit_response.go | 4 + .../sarama/txn_offset_commit_response_test.go | 31 - vendor/github.com/Shopify/sarama/utils.go | 36 +- .../github.com/Shopify/sarama/utils_test.go | 41 - .../Shopify/sarama/vagrant/boot_cluster.sh | 22 - .../Shopify/sarama/vagrant/create_topics.sh | 8 - .../Shopify/sarama/vagrant/halt_cluster.sh | 15 - .../Shopify/sarama/vagrant/install_cluster.sh | 49 - .../Shopify/sarama/vagrant/kafka.conf | 9 - .../Shopify/sarama/vagrant/provision.sh | 15 - .../Shopify/sarama/vagrant/run_toxiproxy.sh | 22 - .../Shopify/sarama/vagrant/server.properties | 127 - .../Shopify/sarama/vagrant/setup_services.sh | 29 - .../Shopify/sarama/vagrant/toxiproxy.conf | 6 - .../Shopify/sarama/vagrant/zookeeper.conf | 7 - .../sarama/vagrant/zookeeper.properties | 36 - vendor/github.com/Shopify/sarama/zstd.go | 28 + .../alecthomas/template/example_test.go | 72 - .../alecthomas/template/examplefiles_test.go | 183 - .../alecthomas/template/examplefunc_test.go | 55 - .../alecthomas/template/exec_test.go | 1044 - vendor/github.com/alecthomas/template/go.mod | 1 + .../alecthomas/template/multi_test.go | 293 - .../alecthomas/template/parse/lex_test.go | 468 - .../alecthomas/template/parse/parse_test.go | 426 - .../alecthomas/template/testdata/file1.tmpl | 2 - .../alecthomas/template/testdata/file2.tmpl | 2 - .../alecthomas/template/testdata/tmpl1.tmpl | 3 - .../alecthomas/template/testdata/tmpl2.tmpl | 3 - vendor/github.com/alecthomas/units/bytes.go | 10 +- .../github.com/alecthomas/units/bytes_test.go | 49 - vendor/github.com/alecthomas/units/go.mod | 5 + vendor/github.com/alecthomas/units/go.sum | 11 + vendor/github.com/alecthomas/units/si.go | 30 +- vendor/github.com/beorn7/perks/.gitignore | 2 - vendor/github.com/beorn7/perks/README.md | 31 - .../beorn7/perks/histogram/bench_test.go | 26 - .../beorn7/perks/histogram/histogram.go | 108 - .../beorn7/perks/histogram/histogram_test.go | 38 - .../beorn7/perks/quantile/bench_test.go | 63 - .../beorn7/perks/quantile/example_test.go | 121 - .../beorn7/perks/quantile/stream_test.go | 215 - vendor/github.com/beorn7/perks/topk/topk.go | 90 - .../github.com/beorn7/perks/topk/topk_test.go | 57 - .../github.com/cespare/xxhash/v2/.travis.yml | 8 + .../github.com/cespare/xxhash/v2/LICENSE.txt | 22 + vendor/github.com/cespare/xxhash/v2/README.md | 67 + vendor/github.com/cespare/xxhash/v2/go.mod | 3 + .../xxhash/v2/go.sum} | 0 vendor/github.com/cespare/xxhash/v2/xxhash.go | 236 + .../cespare/xxhash/v2/xxhash_amd64.go | 13 + .../cespare/xxhash/v2/xxhash_amd64.s | 215 + .../cespare/xxhash/v2/xxhash_other.go | 76 + .../cespare/xxhash/v2/xxhash_safe.go | 15 + .../cespare/xxhash/v2/xxhash_unsafe.go | 46 + vendor/github.com/davecgh/go-spew/.gitignore | 22 - vendor/github.com/davecgh/go-spew/.travis.yml | 28 - vendor/github.com/davecgh/go-spew/README.md | 201 - .../github.com/davecgh/go-spew/cov_report.sh | 22 - .../davecgh/go-spew/spew/common_test.go | 298 - .../davecgh/go-spew/spew/dump_test.go | 1042 - .../davecgh/go-spew/spew/dumpcgo_test.go | 101 - .../davecgh/go-spew/spew/dumpnocgo_test.go | 26 - .../davecgh/go-spew/spew/example_test.go | 226 - .../davecgh/go-spew/spew/format_test.go | 1558 - .../davecgh/go-spew/spew/internal_test.go | 84 - .../go-spew/spew/internalunsafe_test.go | 101 - .../davecgh/go-spew/spew/spew_test.go | 320 - .../davecgh/go-spew/spew/testdata/dumpcgo.go | 82 - .../davecgh/go-spew/test_coverage.txt | 61 - .../eapache/go-resiliency/.gitignore | 24 - .../eapache/go-resiliency/.travis.yml | 6 - .../eapache/go-resiliency/CHANGELOG.md | 11 - .../eapache/go-resiliency/README.md | 21 - .../eapache/go-resiliency/batcher/README.md | 31 - .../eapache/go-resiliency/batcher/batcher.go | 108 - .../go-resiliency/batcher/batcher_test.go | 123 - .../go-resiliency/breaker/breaker_test.go | 196 - .../eapache/go-resiliency/deadline/README.md | 27 - .../go-resiliency/deadline/deadline.go | 45 - .../go-resiliency/deadline/deadline_test.go | 65 - .../eapache/go-resiliency/retrier/README.md | 26 - .../eapache/go-resiliency/retrier/backoffs.go | 24 - .../go-resiliency/retrier/backoffs_test.go | 55 - .../go-resiliency/retrier/classifier.go | 66 - .../go-resiliency/retrier/classifier_test.go | 66 - .../eapache/go-resiliency/retrier/retrier.go | 69 - .../go-resiliency/retrier/retrier_test.go | 129 - .../eapache/go-resiliency/semaphore/README.md | 22 - .../go-resiliency/semaphore/semaphore.go | 52 - .../go-resiliency/semaphore/semaphore_test.go | 81 - ...020dfb19a68cbcf99dc93dc1030068d4c9968ad0-2 | Bin 8 -> 0 bytes ...05979b224be0294bf350310d4ba5257c9bb815db-3 | 1 - ...0e64ca2823923c5efa03ff2bd6e0aa1018eeca3b-9 | Bin 56 -> 0 bytes .../eapache/go-xerial-snappy/corpus/1 | Bin 31 -> 0 bytes ...361a1c6d2a8f80780826c3d83ad391d0475c922f-4 | Bin 50 -> 0 bytes ...117af68228fa64339d362cf980c68ffadff96c8-12 | Bin 248 -> 0 bytes ...4142249be82c8a617cf838eef05394ece39becd3-9 | Bin 76 -> 0 bytes ...1ea8c7d904f1cd913b52e9ead4a96c639d76802-10 | Bin 110 -> 0 bytes ...44083e1447694980c0ee682576e32358c9ee883f-2 | Bin 40 -> 0 bytes ...4d6b359bd538feaa7d36c89235d07d0a443797ac-1 | Bin 29 -> 0 bytes ...521e7e67b6063a75e0eeb24b0d1dd20731d34ad8-4 | 1 - ...526e6f85d1b8777f0d9f70634c9f8b77fbdccdff-7 | Bin 61 -> 0 bytes .../581b8fe7088f921567811fdf30e1f527c9f48e5e | 1 - ...0cd10738158020f5843b43960158c3d116b3a71-11 | Bin 195 -> 0 bytes ...652b031b4b9d601235f86ef62523e63d733b8623-3 | Bin 45 -> 0 bytes ...684a011f6fdfc7ae9863e12381165e82d2a2e356-9 | Bin 111 -> 0 bytes ...72e42fc8e5eaed6a8a077f420fc3bd1f9a7c0919-1 | Bin 8 -> 0 bytes ...80881d1b911b95e0203b3b0e7dc6360c35f7620f-7 | 1 - ...8484b3082d522e0a1f315db1fa1b2a5118be7cc3-8 | Bin 81 -> 0 bytes ...9635bb09260f100bc4a2ee4e3b980fecc5b874ce-1 | Bin 8 -> 0 bytes ...99d36b0b5b1be7151a508dd440ec725a2576c41c-1 | Bin 8 -> 0 bytes ...9d339eddb4e2714ea319c3fb571311cb95fdb067-6 | Bin 55 -> 0 bytes ...b2419fcb7a9aef359de67cb6bd2b8a8c1f5c100f-4 | 1 - ...c1951b29109ec1017f63535ce3699630f46f54e1-5 | Bin 50 -> 0 bytes ...cb806bc4f67316af02d6ae677332a3b6005a18da-5 | 1 - ...d7dd228703739e9252c7ea76f1c5f82ab44686a-10 | Bin 96 -> 0 bytes ...ce3671e91907349cea04fc3f2a4b91c65b99461d-3 | Bin 36 -> 0 bytes ...ce3c6f4c31f74d72fbf74c17d14a8d29aa62059e-6 | 1 - ...e2230aa0ecaebb9b890440effa13f501a89247b2-1 | Bin 35 -> 0 bytes ...fa11d676fb2a77afb8eac3d7ed30e330a7c2efe-11 | Bin 116 -> 0 bytes ...f0445ac39e03978bbc8011316ac8468015ddb72c-1 | Bin 20 -> 0 bytes ...f241da53c6bc1fe3368c55bf28db86ce15a2c784-2 | Bin 20 -> 0 bytes .../eapache/go-xerial-snappy/snappy_test.go | 249 - vendor/github.com/eapache/queue/queue_test.go | 178 - .../.github/ISSUE_TEMPLATE/bug_report.md | 20 - .../.github/ISSUE_TEMPLATE/feature_request.md | 17 - .../.github/ISSUE_TEMPLATE/question.md | 7 - vendor/github.com/golang/protobuf/.gitignore | 17 - vendor/github.com/golang/protobuf/.travis.yml | 31 - vendor/github.com/golang/protobuf/Makefile | 50 - vendor/github.com/golang/protobuf/README.md | 281 - .../golang/protobuf/conformance/Makefile | 49 - .../protobuf/conformance/conformance.go | 154 - .../protobuf/conformance/conformance.sh | 4 - .../protobuf/conformance/failure_list_go.txt | 61 - .../conformance_proto/conformance.pb.go | 1834 - .../conformance_proto/conformance.proto | 273 - .../golang/protobuf/conformance/test.sh | 26 - .../golang/protobuf/descriptor/descriptor.go | 93 - .../protobuf/descriptor/descriptor_test.go | 32 - .../golang/protobuf/jsonpb/jsonpb.go | 1271 - .../golang/protobuf/jsonpb/jsonpb_test.go | 1231 - .../jsonpb_test_proto/more_test_objects.pb.go | 368 - .../jsonpb_test_proto/more_test_objects.proto | 69 - .../jsonpb_test_proto/test_objects.pb.go | 1357 - .../jsonpb_test_proto/test_objects.proto | 179 - .../golang/protobuf/proto/all_test.go | 2492 - .../golang/protobuf/proto/any_test.go | 300 - .../golang/protobuf/proto/buffer.go | 324 + .../github.com/golang/protobuf/proto/clone.go | 253 - .../golang/protobuf/proto/clone_test.go | 390 - .../golang/protobuf/proto/decode.go | 428 - .../golang/protobuf/proto/decode_test.go | 255 - .../golang/protobuf/proto/defaults.go | 63 + .../golang/protobuf/proto/deprecated.go | 113 + .../golang/protobuf/proto/discard.go | 356 +- .../golang/protobuf/proto/discard_test.go | 170 - .../golang/protobuf/proto/encode.go | 203 - .../golang/protobuf/proto/encode_test.go | 85 - .../github.com/golang/protobuf/proto/equal.go | 300 - .../golang/protobuf/proto/equal_test.go | 244 - .../golang/protobuf/proto/extensions.go | 715 +- .../golang/protobuf/proto/extensions_test.go | 688 - .../github.com/golang/protobuf/proto/lib.go | 979 - .../golang/protobuf/proto/map_test.go | 70 - .../golang/protobuf/proto/message_set.go | 314 - .../golang/protobuf/proto/message_set_test.go | 77 - .../golang/protobuf/proto/pointer_reflect.go | 357 - .../golang/protobuf/proto/pointer_unsafe.go | 308 - .../golang/protobuf/proto/properties.go | 648 +- .../github.com/golang/protobuf/proto/proto.go | 167 + .../protobuf/proto/proto3_proto/proto3.pb.go | 611 - .../protobuf/proto/proto3_proto/proto3.proto | 97 - .../golang/protobuf/proto/proto3_test.go | 151 - .../golang/protobuf/proto/registry.go | 323 + .../golang/protobuf/proto/size2_test.go | 63 - .../golang/protobuf/proto/size_test.go | 191 - .../golang/protobuf/proto/table_marshal.go | 2767 - .../golang/protobuf/proto/table_merge.go | 654 - .../golang/protobuf/proto/table_unmarshal.go | 2051 - .../protobuf/proto/test_proto/test.pb.go | 5314 - .../protobuf/proto/test_proto/test.proto | 570 - .../github.com/golang/protobuf/proto/text.go | 843 - .../golang/protobuf/proto/text_decode.go | 801 + .../golang/protobuf/proto/text_encode.go | 560 + .../golang/protobuf/proto/text_parser.go | 880 - .../golang/protobuf/proto/text_parser_test.go | 706 - .../golang/protobuf/proto/text_test.go | 518 - .../github.com/golang/protobuf/proto/wire.go | 78 + .../golang/protobuf/proto/wrappers.go | 34 + .../protoc-gen-go/descriptor/descriptor.pb.go | 2812 - .../protoc-gen-go/descriptor/descriptor.proto | 872 - .../golang/protobuf/protoc-gen-go/doc.go | 51 - .../protoc-gen-go/generator/generator.go | 3086 - .../generator/internal/remap/remap.go | 117 - .../generator/internal/remap/remap_test.go | 82 - .../protoc-gen-go/generator/name_test.go | 115 - .../protobuf/protoc-gen-go/golden_test.go | 422 - .../protobuf/protoc-gen-go/grpc/grpc.go | 484 - .../protobuf/protoc-gen-go/link_grpc.go | 34 - .../golang/protobuf/protoc-gen-go/main.go | 98 - .../protoc-gen-go/plugin/plugin.pb.go | 369 - .../protoc-gen-go/plugin/plugin.pb.golden | 83 - .../protoc-gen-go/plugin/plugin.proto | 167 - .../testdata/deprecated/deprecated.pb.go | 234 - .../testdata/deprecated/deprecated.proto | 69 - .../extension_base/extension_base.pb.go | 139 - .../extension_base/extension_base.proto | 48 - .../extension_extra/extension_extra.pb.go | 78 - .../extension_extra/extension_extra.proto | 40 - .../protoc-gen-go/testdata/extension_test.go | 206 - .../extension_user/extension_user.pb.go | 401 - .../extension_user/extension_user.proto | 102 - .../protoc-gen-go/testdata/grpc/grpc.pb.go | 444 - .../protoc-gen-go/testdata/grpc/grpc.proto | 61 - .../testdata/import_public/a.pb.go | 110 - .../testdata/import_public/a.proto | 45 - .../testdata/import_public/b.pb.go | 87 - .../testdata/import_public/b.proto | 43 - .../testdata/import_public/sub/a.pb.go | 100 - .../testdata/import_public/sub/a.proto | 47 - .../testdata/import_public/sub/b.pb.go | 67 - .../testdata/import_public/sub/b.proto | 39 - .../testdata/import_public_test.go | 66 - .../testdata/imports/fmt/m.pb.go | 66 - .../testdata/imports/fmt/m.proto | 35 - .../testdata/imports/test_a_1/m1.pb.go | 130 - .../testdata/imports/test_a_1/m1.proto | 44 - .../testdata/imports/test_a_1/m2.pb.go | 67 - .../testdata/imports/test_a_1/m2.proto | 35 - .../testdata/imports/test_a_2/m3.pb.go | 67 - .../testdata/imports/test_a_2/m3.proto | 35 - .../testdata/imports/test_a_2/m4.pb.go | 67 - .../testdata/imports/test_a_2/m4.proto | 35 - .../testdata/imports/test_b_1/m1.pb.go | 67 - .../testdata/imports/test_b_1/m1.proto | 35 - .../testdata/imports/test_b_1/m2.pb.go | 67 - .../testdata/imports/test_b_1/m2.proto | 35 - .../testdata/imports/test_import_a1m1.pb.go | 80 - .../testdata/imports/test_import_a1m1.proto | 42 - .../testdata/imports/test_import_a1m2.pb.go | 80 - .../testdata/imports/test_import_a1m2.proto | 42 - .../testdata/imports/test_import_all.pb.go | 138 - .../testdata/imports/test_import_all.proto | 58 - .../protoc-gen-go/testdata/main_test.go | 48 - .../protoc-gen-go/testdata/multi/multi1.pb.go | 96 - .../protoc-gen-go/testdata/multi/multi1.proto | 46 - .../protoc-gen-go/testdata/multi/multi2.pb.go | 128 - .../protoc-gen-go/testdata/multi/multi2.proto | 48 - .../protoc-gen-go/testdata/multi/multi3.pb.go | 115 - .../protoc-gen-go/testdata/multi/multi3.proto | 45 - .../protoc-gen-go/testdata/my_test/test.pb.go | 1192 - .../protoc-gen-go/testdata/my_test/test.proto | 158 - .../testdata/proto3/proto3.pb.go | 196 - .../testdata/proto3/proto3.proto | 55 - .../github.com/golang/protobuf/ptypes/any.go | 214 +- .../golang/protobuf/ptypes/any/any.pb.go | 227 +- .../golang/protobuf/ptypes/any/any.proto | 149 - .../golang/protobuf/ptypes/any_test.go | 154 - .../github.com/golang/protobuf/ptypes/doc.go | 37 +- .../golang/protobuf/ptypes/duration.go | 116 +- .../protobuf/ptypes/duration/duration.pb.go | 196 +- .../protobuf/ptypes/duration/duration.proto | 117 - .../golang/protobuf/ptypes/duration_test.go | 121 - .../golang/protobuf/ptypes/empty/empty.pb.go | 79 - .../golang/protobuf/ptypes/empty/empty.proto | 52 - .../protobuf/ptypes/struct/struct.pb.go | 450 - .../protobuf/ptypes/struct/struct.proto | 96 - .../golang/protobuf/ptypes/timestamp.go | 115 +- .../protobuf/ptypes/timestamp/timestamp.pb.go | 213 +- .../protobuf/ptypes/timestamp/timestamp.proto | 133 - .../golang/protobuf/ptypes/timestamp_test.go | 153 - .../protobuf/ptypes/wrappers/wrappers.pb.go | 443 - .../protobuf/ptypes/wrappers/wrappers.proto | 118 - .../github.com/golang/protobuf/regenerate.sh | 53 - vendor/github.com/golang/snappy/AUTHORS | 2 + vendor/github.com/golang/snappy/CONTRIBUTORS | 2 + .../golang/snappy/cmd/snappytool/main.go | 46 - vendor/github.com/golang/snappy/decode.go | 4 + .../github.com/golang/snappy/decode_arm64.s | 503 + vendor/github.com/golang/snappy/decode_asm.go | 15 + .../github.com/golang/snappy/decode_other.go | 24 +- vendor/github.com/golang/snappy/encode.go | 4 + .../github.com/golang/snappy/encode_arm64.s | 729 + vendor/github.com/golang/snappy/encode_asm.go | 30 + .../github.com/golang/snappy/encode_other.go | 2 +- vendor/github.com/golang/snappy/go.mod | 1 + .../github.com/golang/snappy/golden_test.go | 1965 - vendor/github.com/golang/snappy/misc/main.cpp | 79 - .../github.com/golang/snappy/snappy_test.go | 1353 - .../snappy/testdata/Mark.Twain-Tom.Sawyer.txt | 396 - .../Mark.Twain-Tom.Sawyer.txt.rawsnappy | Bin 9871 -> 0 bytes .../github.com/hashicorp/go-uuid/.travis.yml | 12 + vendor/github.com/hashicorp/go-uuid/LICENSE | 363 + vendor/github.com/hashicorp/go-uuid/README.md | 8 + vendor/github.com/hashicorp/go-uuid/go.mod | 1 + vendor/github.com/hashicorp/go-uuid/uuid.go | 83 + vendor/github.com/jcmturner/aescts/v2/LICENSE | 201 + .../github.com/jcmturner/aescts/v2/aescts.go | 186 + vendor/github.com/jcmturner/aescts/v2/go.mod | 5 + vendor/github.com/jcmturner/aescts/v2/go.sum | 10 + .../github.com/jcmturner/dnsutils/v2/LICENSE | 201 + .../github.com/jcmturner/dnsutils/v2/go.mod | 5 + .../github.com/jcmturner/dnsutils/v2/go.sum | 10 + .../github.com/jcmturner/dnsutils/v2/srv.go | 95 + vendor/github.com/jcmturner/gofork/LICENSE | 27 + .../jcmturner/gofork/encoding/asn1/README.md | 5 + .../jcmturner/gofork/encoding/asn1/asn1.go | 1003 + .../jcmturner/gofork/encoding/asn1/common.go | 173 + .../jcmturner/gofork/encoding/asn1/marshal.go | 659 + .../gofork/x/crypto/pbkdf2/pbkdf2.go | 98 + vendor/github.com/jcmturner/gokrb5/v8/LICENSE | 201 + .../jcmturner/gokrb5/v8/asn1tools/tools.go | 86 + .../jcmturner/gokrb5/v8/client/ASExchange.go | 182 + .../jcmturner/gokrb5/v8/client/TGSExchange.go | 103 + .../jcmturner/gokrb5/v8/client/cache.go | 134 + .../jcmturner/gokrb5/v8/client/client.go | 329 + .../jcmturner/gokrb5/v8/client/network.go | 218 + .../jcmturner/gokrb5/v8/client/passwd.go | 75 + .../jcmturner/gokrb5/v8/client/session.go | 295 + .../jcmturner/gokrb5/v8/client/settings.go | 93 + .../jcmturner/gokrb5/v8/config/error.go | 30 + .../jcmturner/gokrb5/v8/config/hosts.go | 141 + .../jcmturner/gokrb5/v8/config/krb5conf.go | 728 + .../jcmturner/gokrb5/v8/credentials/ccache.go | 333 + .../gokrb5/v8/credentials/credentials.go | 405 + .../v8/crypto/aes128-cts-hmac-sha1-96.go | 129 + .../v8/crypto/aes128-cts-hmac-sha256-128.go | 132 + .../v8/crypto/aes256-cts-hmac-sha1-96.go | 129 + .../v8/crypto/aes256-cts-hmac-sha384-192.go | 132 + .../gokrb5/v8/crypto/common/common.go | 132 + .../jcmturner/gokrb5/v8/crypto/crypto.go | 175 + .../gokrb5/v8/crypto/des3-cbc-sha1-kd.go | 139 + .../jcmturner/gokrb5/v8/crypto/etype/etype.go | 29 + .../jcmturner/gokrb5/v8/crypto/rc4-hmac.go | 133 + .../gokrb5/v8/crypto/rfc3961/encryption.go | 119 + .../gokrb5/v8/crypto/rfc3961/keyDerivation.go | 169 + .../gokrb5/v8/crypto/rfc3961/nfold.go | 107 + .../gokrb5/v8/crypto/rfc3962/encryption.go | 89 + .../gokrb5/v8/crypto/rfc3962/keyDerivation.go | 51 + .../gokrb5/v8/crypto/rfc4757/checksum.go | 40 + .../gokrb5/v8/crypto/rfc4757/encryption.go | 80 + .../gokrb5/v8/crypto/rfc4757/keyDerivation.go | 40 + .../gokrb5/v8/crypto/rfc4757/msgtype.go | 20 + .../gokrb5/v8/crypto/rfc8009/encryption.go | 125 + .../gokrb5/v8/crypto/rfc8009/keyDerivation.go | 135 + .../jcmturner/gokrb5/v8/gssapi/MICToken.go | 174 + .../jcmturner/gokrb5/v8/gssapi/README.md | 20 + .../gokrb5/v8/gssapi/contextFlags.go | 27 + .../jcmturner/gokrb5/v8/gssapi/gssapi.go | 202 + .../jcmturner/gokrb5/v8/gssapi/wrapToken.go | 195 + .../gokrb5/v8/iana/addrtype/constants.go | 15 + .../gokrb5/v8/iana/adtype/constants.go | 23 + .../gokrb5/v8/iana/asnAppTag/constants.go | 24 + .../gokrb5/v8/iana/chksumtype/constants.go | 32 + .../jcmturner/gokrb5/v8/iana/constants.go | 5 + .../gokrb5/v8/iana/errorcode/constants.go | 155 + .../gokrb5/v8/iana/etypeID/constants.go | 101 + .../gokrb5/v8/iana/flags/constants.go | 36 + .../gokrb5/v8/iana/keyusage/constants.go | 42 + .../gokrb5/v8/iana/msgtype/constants.go | 18 + .../gokrb5/v8/iana/nametype/constants.go | 15 + .../gokrb5/v8/iana/patype/constants.go | 77 + .../gokrb5/v8/kadmin/changepasswddata.go | 23 + .../jcmturner/gokrb5/v8/kadmin/message.go | 114 + .../jcmturner/gokrb5/v8/kadmin/passwd.go | 68 + .../jcmturner/gokrb5/v8/keytab/keytab.go | 530 + .../jcmturner/gokrb5/v8/krberror/error.go | 67 + .../jcmturner/gokrb5/v8/messages/APRep.go | 49 + .../jcmturner/gokrb5/v8/messages/APReq.go | 199 + .../jcmturner/gokrb5/v8/messages/KDCRep.go | 360 + .../jcmturner/gokrb5/v8/messages/KDCReq.go | 432 + .../jcmturner/gokrb5/v8/messages/KRBCred.go | 102 + .../jcmturner/gokrb5/v8/messages/KRBError.go | 94 + .../jcmturner/gokrb5/v8/messages/KRBPriv.go | 108 + .../jcmturner/gokrb5/v8/messages/KRBSafe.go | 43 + .../jcmturner/gokrb5/v8/messages/Ticket.go | 262 + .../jcmturner/gokrb5/v8/pac/client_claims.go | 34 + .../jcmturner/gokrb5/v8/pac/client_info.go | 31 + .../gokrb5/v8/pac/credentials_info.go | 86 + .../jcmturner/gokrb5/v8/pac/device_claims.go | 34 + .../jcmturner/gokrb5/v8/pac/device_info.go | 32 + .../gokrb5/v8/pac/kerb_validation_info.go | 110 + .../jcmturner/gokrb5/v8/pac/pac_type.go | 251 + .../gokrb5/v8/pac/s4u_delegation_info.go | 26 + .../jcmturner/gokrb5/v8/pac/signature_data.go | 67 + .../gokrb5/v8/pac/supplemental_cred.go | 87 + .../jcmturner/gokrb5/v8/pac/upn_dns_info.go | 73 + .../gokrb5/v8/types/Authenticator.go | 81 + .../gokrb5/v8/types/AuthorizationData.go | 55 + .../jcmturner/gokrb5/v8/types/Cryptosystem.go | 72 + .../jcmturner/gokrb5/v8/types/HostAddress.go | 180 + .../gokrb5/v8/types/KerberosFlags.go | 68 + .../jcmturner/gokrb5/v8/types/PAData.go | 155 + .../gokrb5/v8/types/PrincipalName.go | 67 + .../jcmturner/gokrb5/v8/types/TypedData.go | 18 + vendor/github.com/jcmturner/rpc/v2/LICENSE | 201 + .../jcmturner/rpc/v2/mstypes/claims.go | 152 + .../jcmturner/rpc/v2/mstypes/common.go | 12 + .../jcmturner/rpc/v2/mstypes/filetime.go | 52 + .../rpc/v2/mstypes/group_membership.go | 19 + .../rpc/v2/mstypes/kerb_sid_and_attributes.go | 23 + .../jcmturner/rpc/v2/mstypes/reader.go | 109 + .../rpc/v2/mstypes/rpc_unicode_string.go | 13 + .../jcmturner/rpc/v2/mstypes/sid.go | 36 + .../rpc/v2/mstypes/user_session_key.go | 11 + .../github.com/jcmturner/rpc/v2/ndr/arrays.go | 413 + .../jcmturner/rpc/v2/ndr/decoder.go | 393 + .../github.com/jcmturner/rpc/v2/ndr/error.go | 18 + .../github.com/jcmturner/rpc/v2/ndr/header.go | 116 + .../github.com/jcmturner/rpc/v2/ndr/pipe.go | 31 + .../jcmturner/rpc/v2/ndr/primitives.go | 211 + .../jcmturner/rpc/v2/ndr/rawbytes.go | 61 + .../jcmturner/rpc/v2/ndr/strings.go | 70 + .../github.com/jcmturner/rpc/v2/ndr/tags.go | 69 + .../github.com/jcmturner/rpc/v2/ndr/union.go | 57 + vendor/github.com/klauspost/compress/LICENSE | 28 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 168 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 684 + .../klauspost/compress/fse/decompress.go | 374 + .../github.com/klauspost/compress/fse/fse.go | 144 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 329 + .../klauspost/compress/huff0/bitwriter.go | 210 + .../klauspost/compress/huff0/bytereader.go | 54 + .../klauspost/compress/huff0/compress.go | 657 + .../klauspost/compress/huff0/decompress.go | 1164 + .../klauspost/compress/huff0/huff0.go | 273 + .../klauspost/compress/snappy/.gitignore | 16 + .../klauspost/compress/snappy/AUTHORS | 15 + .../klauspost/compress/snappy/CONTRIBUTORS | 37 + .../klauspost/compress/snappy/LICENSE | 27 + .../klauspost/compress/snappy/README | 107 + .../klauspost/compress/snappy/decode.go | 237 + .../compress}/snappy/decode_amd64.go | 0 .../klauspost/compress/snappy/decode_amd64.s | 482 + .../klauspost/compress/snappy/decode_other.go | 115 + .../klauspost/compress/snappy/encode.go | 285 + .../compress}/snappy/encode_amd64.go | 0 .../klauspost/compress/snappy/encode_amd64.s | 730 + .../klauspost/compress/snappy/encode_other.go | 238 + .../klauspost/compress/snappy/runbench.cmd | 2 + .../klauspost/compress/snappy/snappy.go | 98 + .../klauspost/compress/zstd/README.md | 417 + .../klauspost/compress/zstd/bitreader.go | 136 + .../klauspost/compress/zstd/bitwriter.go | 169 + .../klauspost/compress/zstd/blockdec.go | 739 + .../klauspost/compress/zstd/blockenc.go | 855 + .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 127 + .../klauspost/compress/zstd/bytereader.go | 88 + .../klauspost/compress/zstd/decodeheader.go | 202 + .../klauspost/compress/zstd/decoder.go | 561 + .../compress/zstd/decoder_options.go | 84 + .../klauspost/compress/zstd/dict.go | 122 + .../klauspost/compress/zstd/enc_base.go | 155 + .../klauspost/compress/zstd/enc_best.go | 484 + .../klauspost/compress/zstd/enc_better.go | 595 + .../klauspost/compress/zstd/enc_dfast.go | 713 + .../klauspost/compress/zstd/enc_fast.go | 661 + .../klauspost/compress/zstd/encoder.go | 570 + .../compress/zstd/encoder_options.go | 290 + .../klauspost/compress/zstd/framedec.go | 494 + .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 385 + .../klauspost/compress/zstd/fse_encoder.go | 726 + .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 77 + .../klauspost/compress/zstd/history.go | 89 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 58 + .../compress/zstd/internal/xxhash/xxhash.go | 238 + .../zstd/internal/xxhash/xxhash_amd64.go | 13 + .../zstd/internal/xxhash/xxhash_amd64.s | 215 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/seqdec.go | 492 + .../klauspost/compress/zstd/seqenc.go | 115 + .../klauspost/compress/zstd/snappy.go | 436 + .../klauspost/compress/zstd/zstd.go | 156 + .../go-windows-terminal-sequences/README.md | 2 + .../sequences.go | 3 +- .../sequences_dummy.go | 11 + .../sequences_test.go | 48 - .../kazoo-go/functional_cluster_test.go | 98 - .../kazoo-go/functional_consumergroup_test.go | 658 - .../kazoo-go/functional_topic_admin_test.go | 123 - .../functional_topic_metadata_test.go | 111 - .../krallistic/kazoo-go/kazoo_test.go | 56 - .../kazoo-go/tools/kafka-topics/.gitignore | 2 - .../tools/kafka-topics/kafka-topics.go | 93 - .../kazoo-go/topic_metadata_test.go | 210 - .../golang_protobuf_extensions/.travis.yml | 8 - .../Makefile.TRAVIS | 15 - .../golang_protobuf_extensions/README.md | 20 - .../golang_protobuf_extensions/ext/moved.go | 2 - .../pbtest/deleted.go | 2 - .../pbutil/all_test.go | 178 - .../pbutil/decode_test.go | 99 - .../pbutil/encode_test.go | 67 - .../testdata/README.THIRD_PARTY | 4 - .../testdata/test.pb.go | 4029 - .../testdata/test.proto | 540 - vendor/github.com/pierrec/lz4/.gitignore | 3 +- vendor/github.com/pierrec/lz4/.travis.yml | 4 +- vendor/github.com/pierrec/lz4/README.md | 96 +- vendor/github.com/pierrec/lz4/bench_test.go | 119 - vendor/github.com/pierrec/lz4/block.go | 314 +- vendor/github.com/pierrec/lz4/block_test.go | 98 - vendor/github.com/pierrec/lz4/decode_amd64.go | 8 + vendor/github.com/pierrec/lz4/decode_amd64.s | 375 + vendor/github.com/pierrec/lz4/decode_other.go | 98 + vendor/github.com/pierrec/lz4/errors.go | 30 + vendor/github.com/pierrec/lz4/export_test.go | 13 - ...1572067d493db8dc8161f05c339a5192b0b4087-22 | Bin 105 -> 0 bytes ...02766f768fbfbd81b752cce427eb5242a44929cc-5 | Bin 19 -> 0 bytes ...32f04032e12567057782672bb12670c20d38439-10 | Bin 23 -> 0 bytes ...367b985641aca66e6e4eeea68acf5e2a02c62a8-16 | Bin 38 -> 0 bytes ...03e85abc49352b2f7cc83efd7e4274da02d78b84-6 | Bin 13 -> 0 bytes ...049f82a81bb6b4d7cf69fac5e413f6ce299d48cf-8 | Bin 19 -> 0 bytes ...04c05c7956f17e57a91a47909bd0706135cf17a6-1 | Bin 46 -> 0 bytes ...50e2af2a57d8044139ba21375f0ac6fcb7ab0b1-12 | Bin 84 -> 0 bytes ...519d86e62cc577b98e9a4836b071ba1692c7674-30 | Bin 88 -> 0 bytes ...0547c73efb9b6a345fd9a52aa0798b48dd9aca62-2 | Bin 33 -> 0 bytes ...5aae2cf8756f66066cf623618042ebaa92ec745-14 | Bin 73 -> 0 bytes ...608f9eba5e6fd4d70241a81a6950ca51d78eb64-33 | Bin 88 -> 0 bytes ...07fe3e792f0d2862dccc04db22c0e4aef4d41b49-6 | Bin 12 -> 0 bytes ...990ac54decbca1a97893e83c7feb2be89cb10ea-14 | Bin 68 -> 0 bytes ...9f2eda28ecc97304659afded4d13a188baf2107-22 | Bin 51 -> 0 bytes ...a4ff2ab3a01888686c5bc358b72be108bbb4721-16 | Bin 20 -> 0 bytes ...a7fddf3c8aa1c781223748129c9dc0807de3a6b-28 | Bin 51 -> 0 bytes ...0b5bec228930b2cfcda3be9a39107a6bc8044f1e-3 | Bin 16 -> 0 bytes ...ca5fd3841a6777873c7ef26f65a384e7b15d065-18 | Bin 61 -> 0 bytes ...0ce9c3bac93df0ea1f6343d223d5220f9eb2383a-8 | Bin 22 -> 0 bytes ...cf885cd35e7124005b0ba0c3c4431ddfaeff84d-11 | Bin 48 -> 0 bytes ...0d7c02d4e91d82b0355baaca1237062639442db6-3 | 1 - ...0e1b2b0c49dfb86fe01d3453dd24e39482e132e8-7 | Bin 21 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/1.bz2 | Bin 42 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/10.bz2 | Bin 14 -> 0 bytes ...06b9d718c97bb7c872847d3070a570e99d9fa3e-22 | Bin 82 -> 0 bytes ...0fa5d9f0fe75f73c0e92a1fe1c00f0041ec8f39-24 | Bin 51 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/11.bz2 | Bin 14 -> 0 bytes ...113a12cbb28b83fcee714d58c35bbf52c0740e90-7 | Bin 21 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/12.bz2 | Bin 14 -> 0 bytes ...288161f8ce422490f63f257ce7338ef90fb8827-15 | Bin 70 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/13.bz2 | Bin 14 -> 0 bytes ...36f7224ae337a61df2e72b80af8b1aaa5933af3-10 | Bin 21 -> 0 bytes ...3c3c26f7a34d01fc89c92ca8ba2ba5ae430c225-16 | Bin 38 -> 0 bytes ...3db64707d1ea3070b4a37b6c1291d6125acbbd3-10 | Bin 22 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/14.bz2 | Bin 14 -> 0 bytes ...4193748a7b6cda204b11d042a35635151e90dbb-20 | Bin 65 -> 0 bytes ...42d4f8cb427dd3562d72d889dfc0ea3a2b03d98-22 | Bin 83 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/15.bz2 | Bin 14 -> 0 bytes ...5663b854e9a4f193502ea6463dae38b4d8fca90-19 | Bin 71 -> 0 bytes ...15e223354eb5378a7ee74a41dfab28ffc895ca33-1 | Bin 55 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/16.bz2 | Bin 14 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/17.bz2 | Bin 14 -> 0 bytes ...177c1c68fead4507aa47dd2455fd17a10ceda5ea-1 | Bin 31 -> 0 bytes ...7871030a73ac4d12ada652948135cb4639d679c-34 | Bin 42 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/18.bz2 | Bin 14 -> 0 bytes ...80a2772b126d31abcb3ef692a14b13cf47f103e-17 | Bin 38 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/19.bz2 | Bin 48 -> 0 bytes ...91e0dd24b8c7f8babeae4839768df39acc17eb1-17 | Bin 32 -> 0 bytes ...971e6ed6c6f6069fc2a9ed3038101e89bbcc381-26 | Bin 68 -> 0 bytes ...1a582381781f264f551bd6f0f2284a931147e6d9-4 | Bin 9 -> 0 bytes ...a58f02dc83ac8315a85babdea6d757cbff2bb03-30 | Bin 66 -> 0 bytes ...a5a08b67764facaad851b9f1cbc5cfb31b7fb56-29 | Bin 118 -> 0 bytes ...c2781a1ffae4059ce3e93a55ec8d8cbf8bdecdf-22 | Bin 105 -> 0 bytes ...c944d5065b1a2b30e412604a14aa52565a5765b-35 | Bin 44 -> 0 bytes ...1d37fb332301cf7de0bd51a8c1aa9be4935e89fc-1 | 1 - ...1d6b87b52e62cb84be834478ad88129f5e1f247b-9 | Bin 32 -> 0 bytes ...1ec2f11a8d8b9cf188a58f673a0b4a8608a926ca-3 | 1 - ...fc2ba0bb981fec47badea1c80219452c9e3c76c-22 | Bin 88 -> 0 bytes ...1fd8444ac43541c44a1c6ed8df2f688b1fa09681-1 | Bin 29 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/2.bz2 | Bin 42 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/20.bz2 | Bin 48 -> 0 bytes ...02a9c8b188cae90f29bce3bf0438a035c504eb4-20 | Bin 62 -> 0 bytes ...065ba3177c7dc5047742faa7158b3faeaac1f3c-32 | Bin 88 -> 0 bytes ...0cf0057443ecb322ff1169ecbe6cf20250f15af-13 | Bin 32 -> 0 bytes ...0d1a26afe563ad77e7a95fbee6ff59ebf3e61ab-13 | Bin 46 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/21.bz2 | Bin 48 -> 0 bytes ...1c8be1bb9eeea5b141500dee4987ab7fbd40d4a-23 | Bin 20 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/22.bz2 | Bin 48 -> 0 bytes ...2201e32d052c15874f0323a09c330f3666029a72-1 | Bin 1473 -> 0 bytes ...26780b32ba8f87ec614fdb376aa0884011c4ca9-17 | Bin 32 -> 0 bytes ...2897c61698649d7570de91613afdc19b66e6965-20 | Bin 71 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/23.bz2 | Bin 48 -> 0 bytes ...34cc427d9be32470f3c2e11a6bc16567f558e55-22 | Bin 37 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/24.bz2 | Bin 48 -> 0 bytes ...2486a84bf0f161f45b050d9c19ea9e35f5def864-8 | Bin 25 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/25.bz2 | Bin 48 -> 0 bytes ...5252b16cd4afa8ef86122448688c7095684c86b-12 | Bin 25 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/26.bz2 | Bin 48 -> 0 bytes ...63fb3d738b862ec4050e5a9fbabfbd99cb0d9a5-16 | Bin 32 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/27.bz2 | Bin 48 -> 0 bytes ...76580343a14eec04143e89a778dae3e14df472c-17 | Bin 52 -> 0 bytes ...27fb5dc4016dc640e55a60719a222c38c604fa6b-2 | Bin 14 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/28.bz2 | Bin 157 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/29.bz2 | Bin 157 -> 0 bytes ...2a08d7c56ff9959698688f19ddd2e1e4d4651270-3 | 1 - ...a33d8514fb512aa20b0a56800cd3e12f3952b6b-26 | Bin 51 -> 0 bytes ...2a52400dd3aa2d2a40657d1e51c47c1929912927-3 | Bin 18 -> 0 bytes ...ab005ac79cd4dada693dd2a747c001898d45e1e-16 | Bin 43 -> 0 bytes ...2b39aa66ecfac58e61185c9664a968233931496a-9 | 1 - ...c2a5947341d76797a7e2299f39d01e3aebb2eb8-19 | Bin 70 -> 0 bytes ...cc2308b75a2e8f7eafcf69370767e5fce314892-13 | Bin 32 -> 0 bytes ...cdafdadb156e2759c389b6b8edf6a402034886c-26 | Bin 51 -> 0 bytes ...2d7f0171116eec9984eaa9138e1312e90a7d67ee-1 | Bin 47 -> 0 bytes ...de93224b5f0db491ced1ec491a9f41d71820671-11 | Bin 23 -> 0 bytes ...2e8487cf61feda70c0d74f12bfb5b692b684f82a-9 | Bin 25 -> 0 bytes ...2f0ee9cf4bb951a37efc6460d5709442bc3de54e-6 | Bin 23 -> 0 bytes ...f1ba7fe1cd90a4023706a2ea9c7c9dca8128119-30 | Bin 167 -> 0 bytes ...fad20024167a500cdb8df5334a614f113efae00-20 | Bin 76 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/3.bz2 | Bin 42 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/30.bz2 | Bin 157 -> 0 bytes ...00579a548d96d64c9da8470efa15e787f1a36f1-28 | Bin 51 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/31.bz2 | Bin 157 -> 0 bytes ...1c6c22708d346ed9e936fa7e77c8d9ab6da8d1e-33 | Bin 88 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/32.bz2 | Bin 157 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/33.bz2 | Bin 157 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/34.bz2 | Bin 157 -> 0 bytes ...44d38ec2ec90cb617e809439938b4cbf3b11f02-10 | Bin 40 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/35.bz2 | Bin 157 -> 0 bytes ...52631eab692c4a2c378b231fb3407ebcc0c3039-33 | Bin 88 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/36.bz2 | Bin 157 -> 0 bytes ...363d4559cac10516289fe1b6029590c4c7a6d8eb-5 | Bin 12 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/37.bz2 | Bin 58 -> 0 bytes ...771c6e8ea0f20350dae0180a9b14e36b8aef244-22 | Bin 71 -> 0 bytes ...37ee7fab504f2d2039753d73dd0290c884bd57bf-8 | Bin 25 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/38.bz2 | Bin 58 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/39.bz2 | Bin 58 -> 0 bytes ...96101a712463bb336a18f4096fc3eb5923600c1-10 | Bin 74 -> 0 bytes ...96146e06d3a4b2468d080f89ab5862348073424-28 | Bin 118 -> 0 bytes ...97127b75cb59b253ed49206082b0428b6b23d02-17 | Bin 43 -> 0 bytes ...9ccf446395ef707cf92a04b5508deda399372c2-15 | Bin 32 -> 0 bytes ...b6fd6da48bb34284390a75e22940e7234dbbd28-34 | Bin 24 -> 0 bytes ...3de3c5c394a3cf05620bb80871a1f10e9e36f25b-8 | Bin 25 -> 0 bytes ...3dee65f1cf51dfe2e5be498150ce22d2ac5a07fd-1 | Bin 22 -> 0 bytes ...3e34341fb51769fd9d948bdd20c011e335b145f4-1 | Bin 64 -> 0 bytes ...3ee211efb3d5d8058cd9a8c59e40c8d0f7a3df53-1 | Bin 51 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/4.bz2 | Bin 42 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/40.bz2 | Bin 58 -> 0 bytes ...05726718b3f54a0cfae1666f06d3cc1ee747104-14 | Bin 27 -> 0 bytes ...07188676d45d6f9dd5f3c84e7df0e763c7cca57-22 | Bin 82 -> 0 bytes ...08ac1a4a83e082e848c208eed903930d81e81b6-17 | Bin 67 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/41.bz2 | Bin 58 -> 0 bytes ...114fd99aaa4dc95365dc4bbcb3c9a8a03434a5a-29 | Bin 55 -> 0 bytes ...131f155339a3476898088b065b8588a2b28278e-26 | Bin 88 -> 0 bytes ...413e39442f005279560ddad02bbdd1a05c9f0eaf-4 | 1 - ...41b7eaf8892043eccf381ccbc46ab024eb9c503c-4 | Bin 9 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/42.bz2 | Bin 58 -> 0 bytes ...208b7fe7ac3a530c159a1c8fd09dd3078b5650f-15 | Bin 35 -> 0 bytes ...421bd1daa317c5d67fa21879de29d062c342294b-5 | Bin 19 -> 0 bytes ...2544ff3318fe86dd466e9a05068e752a1057fcc-32 | Bin 123 -> 0 bytes ...2b056f9dac9cc658c80092e490b3dbcd436e3f8-15 | Bin 32 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/43.bz2 | Bin 58 -> 0 bytes ...432c09281c46537c98864bc7d601780562b68410-1 | Bin 22 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/44.bz2 | Bin 58 -> 0 bytes ...46dc91ff0ddc34c3b02f741e3f6f079a4dfcae8-17 | Bin 81 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/45.bz2 | Bin 58 -> 0 bytes ...51831159c1afb87077066147630b4b6caeb54c3-11 | Bin 25 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/46.bz2 | Bin 82 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/47.bz2 | Bin 82 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/48.bz2 | Bin 82 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/49.bz2 | Bin 82 -> 0 bytes ...49861b3d9bca3e2857d806aaecaac09af4bff1dd-2 | Bin 35 -> 0 bytes ...9a3ead0ad96e8da5a4c8f89bd140e1d8af8995a-17 | Bin 128 -> 0 bytes ...a14a3883f5c8819405319e8fb96234f5746a0ef-22 | Bin 48 -> 0 bytes ...a625a4b4f3069707e88f16db88e993dabc41aa2-27 | Bin 51 -> 0 bytes ...a6464c2aba2492f5122856de7ac451994eadda4-10 | Bin 20 -> 0 bytes ...b0ab2fc1fdfc56066c5c1f2751b292f4ddc557e-16 | Bin 71 -> 0 bytes ...4b55f37e6637f4246a41caa490da4bec632379d4-7 | Bin 21 -> 0 bytes ...4bb422b835278e4aca92d076331d9c8cc5752345-1 | Bin 47 -> 0 bytes ...bd00d26b893ce064dad6e771f30541b541d43b9-18 | Bin 114 -> 0 bytes ...cde5adc216a29fff2ec39e23ccc6fca80cd4a15-21 | Bin 83 -> 0 bytes ...d1b64babe1f045b8374f4d74949622591546eb5-17 | Bin 56 -> 0 bytes ...d49686993529cfe29473c50b9b0fb2b6ea4f6bf-13 | Bin 25 -> 0 bytes ...4ea726d6736026a733707e695d9c2cdc83efc05b-5 | Bin 32 -> 0 bytes ...ef3e6d20ccec24376a526ab9ec9f6f2cc604129-25 | Bin 147 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/5.bz2 | Bin 42 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/50.bz2 | Bin 82 -> 0 bytes ...0a87eb0c097a7ebf7f1bf3be2c6a7dbe6b6c5c3-23 | Bin 105 -> 0 bytes ...0e3ac1126c605158726db6f2cca3120f99b8e73-22 | Bin 105 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/51.bz2 | Bin 82 -> 0 bytes ...1075c34f23d161fb97edcf6f1b73ee6005009a0-28 | Bin 61 -> 0 bytes ...12ed5fb4e92818b75bd7633f58d6ca5340ffd94-27 | Bin 195 -> 0 bytes ...514a62216c761adf23d946f11c0d1a0410990641-3 | Bin 15 -> 0 bytes ...16d84c21ac984bd1cae56910d71b62e39610c5d-29 | Bin 51 -> 0 bytes ...17d39f406222f0a0121b7a1961953204674c251-33 | Bin 88 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/52.bz2 | Bin 82 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/53.bz2 | Bin 82 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/54.bz2 | Bin 82 -> 0 bytes ...5431cabbc58d8dc143ece079de40300c1ce6e101-1 | Bin 144 -> 0 bytes ...5700385089e16e44968ea410c6b90206b16d72a-14 | Bin 20 -> 0 bytes ...55b9a902445e2bfa2f0f37d630779d329eeda20e-1 | Bin 33 -> 0 bytes ...620a492eaf067734e5b8b64517b28ec3beaa97e-12 | Bin 46 -> 0 bytes ...5699fea659964d8ab94069d08b0b97834c0a42df-2 | 1 - ...765fc21629571e51adf2fc2bc8b64541a1ea08d-18 | Bin 81 -> 0 bytes ...768ea5d1911143f4b1c0585b9b864ebe16aa004-12 | Bin 33 -> 0 bytes ...7b780437f4abf2d5cba0775bf802a4dfdb067d6-25 | Bin 195 -> 0 bytes ...8f53d40265c9a49c0d3b4292cb637464a4e376a-17 | Bin 35 -> 0 bytes ...59b254c3565c9eed2bc93385b821da897afcbb15-1 | Bin 44 -> 0 bytes ...a962e3d6a128983afe9ea78a28cce0f40a790c0-14 | Bin 42 -> 0 bytes ...af52ef91b6f717ffdd805585e24806407e9621b-14 | Bin 114 -> 0 bytes ...5b01aeb030dc1dc9568fd32f1647d92f0692a411-6 | 1 - ...bbd27cea704a4e6ff3f42f4792a91eb7839bc0d-12 | Bin 28 -> 0 bytes ...5bd895c23369df9505dd99ffcd035dc5e897264b-1 | Bin 49 -> 0 bytes ...5bfd84d7b2ba6b6325d5135fb0a9ae1ec5d7d3e1-2 | Bin 48 -> 0 bytes ...5c4f347c3567baf700dfccf49a91192c83b89da2-8 | 1 - ...5dd8001f8a87c24f866074c36b6b80f42b298ff0-1 | Bin 38 -> 0 bytes ...ddf63d61aa38da1d409e37b301e0fe5a207a051-27 | Bin 156 -> 0 bytes ...e19e298d051aac48b7683dc24577b46268b630c-35 | Bin 24 -> 0 bytes ...e54c67050ee8583c7453ff13d6eec15b2255288-20 | Bin 83 -> 0 bytes ...f946423d1138924933334c6e5d3eb13e1020e9c-33 | Bin 33 -> 0 bytes ...5fbebd9edd144c4b9869ed4ab40c7cc3c46a4a8f-4 | 1 - .../github.com/pierrec/lz4/fuzz/corpus/6.bz2 | Bin 42 -> 0 bytes ...046b14dd1f6925bcfe470a8484353f525db6a9c-19 | Bin 57 -> 0 bytes ...608a9993a51ec7bf252ac76b163def5f7002d2e4-4 | 1 - ...610d8dc3cf4012e4e2d070988b0720285a4c361e-7 | Bin 21 -> 0 bytes ...1b196987682fb64ef9c4ff37532bf9b2ac201bc-14 | Bin 30 -> 0 bytes ...26f8b6efa3ea0f254789fe6cf52f6e52538f357-25 | Bin 219 -> 0 bytes ...6277f2e0a6df2ac61660ee1965c690b87c26b556-7 | Bin 21 -> 0 bytes ...2c738f00c488f493989b2037d9cf1781f0bbd40-11 | Bin 38 -> 0 bytes ...631ffa88df9713a124b3ba6c704c0c75727af2ff-6 | Bin 16 -> 0 bytes ...33df0cd78621cd45067a58d23c6ed67bb1b60cb-31 | Bin 88 -> 0 bytes ...35d5de257a1910a7fd0db2e567edfa348e47270-11 | Bin 38 -> 0 bytes ...64c500b5addcbf8c673188a1477e4159851ae04f-1 | Bin 124 -> 0 bytes ...60387064a3cf4cb81046989929abe1b4fbfc815-17 | Bin 51 -> 0 bytes ...66068a7e7bdfd1038a84aeb3dec6e3cb4d17ad57-2 | Bin 16 -> 0 bytes ...66c34847568ac9cb3ccbb8be26f494988a3e0628-7 | Bin 23 -> 0 bytes ...7534dbd68040fb9a8867e6af384d33ea323758b-29 | Bin 67 -> 0 bytes ...7ab3037ff49f082a877224d68e35069cc4d45eb-16 | Bin 32 -> 0 bytes ...8612136c2017f9caf87122155f82a25f57c2d2a-32 | Bin 36 -> 0 bytes ...981397d97c481e39d563d43916377fb3c74c60e-28 | Bin 88 -> 0 bytes ...9c2accb74456005e2a9bbef15ccad3d076f2124-28 | Bin 112 -> 0 bytes ...69dcc80940a26844b0afe7898fea9cf68b698214-4 | Bin 12 -> 0 bytes ...9fcd886042d5c3ebe89afd561782ac25619e35b-27 | Bin 88 -> 0 bytes ...a04b54e1511633ec895326b4e043e186fa5693b-29 | Bin 171 -> 0 bytes ...6a3e8935204dcd3dc48a1ff7415c305f0e5863aa-9 | Bin 28 -> 0 bytes ...b351674a45f2d9be602fe8d3fb84229551b4ce3-16 | Bin 69 -> 0 bytes ...b72fdd9989971ecc3b50c34ee420f56a03e1026-27 | Bin 112 -> 0 bytes ...b7f4ac7aa8b357dee3067d7a60143c03b54bb8d-16 | Bin 42 -> 0 bytes ...6bc138796e9b80572a6cb1b4a7ba30c97c22359d-1 | Bin 46636 -> 0 bytes ...6e14a407faae939957b80e641a836735bbdcad5a-2 | 1 - ...6f24be0bcac848e4e5b4b85bc60f70f12388a5ed-4 | 1 - .../github.com/pierrec/lz4/fuzz/corpus/7.bz2 | Bin 42 -> 0 bytes ...102c7f297296821114661e00e5bf54d0891d105-21 | Bin 76 -> 0 bytes ...151692dfebfc82876676e65ee9b807d83a3df54-22 | Bin 92 -> 0 bytes ...1a24ce771fb7f1a4163e57a478c3044ad42e62d-24 | Bin 51 -> 0 bytes ...2c738d7492d3055c6fe7391198422984b9e4702-32 | Bin 88 -> 0 bytes ...2f032947602f1be74f01c91165c5118121f36c7-24 | Bin 51 -> 0 bytes ...3b6bd1462a0521b4bf76abb1fd80df6e180dc80-17 | Bin 62 -> 0 bytes ...3c81fef0997a4929b303e02a99f3977870f2013-29 | Bin 159 -> 0 bytes ...3efed803abadf6167fc3f04e0674cc39c30f6af-21 | Bin 37 -> 0 bytes ...603f5f266de813608c4cc1ccd1c798ef8065c5c-23 | Bin 51 -> 0 bytes ...64571571e4d46f4397ed534d0160718ce578da4-26 | Bin 88 -> 0 bytes ...67d1943125a0f6e9397779cc757c9cdd1e05631-17 | Bin 62 -> 0 bytes ...6d22068e2ed4a5952d4adc7ea8dada5509a784c-13 | Bin 30 -> 0 bytes ...740102922cb9933980bb800c1115daf38edf654-24 | Bin 72 -> 0 bytes ...83270b1e353ba3895b7d0c4135b8592e22f6508-12 | Bin 56 -> 0 bytes ...7851a406571c6b4c1aeed0af16db8c48444c3f2b-1 | Bin 34 -> 0 bytes ...78981d313038119ac4f7017349e50a1cba56b382-7 | Bin 23 -> 0 bytes ...8c88c4afaf5962056b1aea720509b9f6f286b91-15 | Bin 64 -> 0 bytes ...8e59daada9b9be755d1b508dd392fa9fc6fa9c2-27 | Bin 156 -> 0 bytes ...8ef686662a059f053f80c1c63c2921deff073fb-31 | Bin 55 -> 0 bytes ...9c5ac978f5aee35e123f523369aa46b1d0a995d-11 | Bin 26 -> 0 bytes ...a0fc8dacceae32a59589711dce63800085c22c7-23 | Bin 88 -> 0 bytes ...adf4aa021efaa953268c817467959fa3c42ca42-13 | Bin 25 -> 0 bytes ...7b8c99ded96973a6e8f523bc1c6ed4ef5c515aa1-1 | 1 - ...b919213d591e6ce4355c635dc1ecc0d8e78befe-30 | Bin 66 -> 0 bytes ...7ba80199cbce9a2eb47da15f0c62fd1fb8fa67d9-3 | 1 - ...cdc0917ad63ce7a7c98301a366c31635f0f099d-14 | Bin 32 -> 0 bytes ...7ce37ad19bfe9f52eeadda03e6b8448e5bf57800-3 | Bin 1452 -> 0 bytes ...7e3132012be223fd55e5e7a7fc2ea602361ed2b4-5 | Bin 8 -> 0 bytes ...e9a88118e4c41e61f5c501e6edf9a5bd2432be3-23 | Bin 135 -> 0 bytes ...7f081c89cfb6344f4aac5f813da1fd15f8bab022-1 | Bin 26 -> 0 bytes ...f8c3b163798c8d5e1b65e03f411b56b6c9384bb-28 | Bin 124 -> 0 bytes ...f970f16026c689c096a19fef1a3282a13ee69dc-20 | Bin 65 -> 0 bytes ...7fa96d28faf45062eb803ea84a334b607e966f90-1 | Bin 31 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/8.bz2 | Bin 42 -> 0 bytes ...261f0c1799ca71c411f6d3f34069b25dac8b739-18 | Bin 52 -> 0 bytes ...2a499521f34b6a9aff3b71d5f8bfd358933a4b2-36 | Bin 44 -> 0 bytes ...82afa534de59025bf1e3358919286525ae7d3347-2 | 1 - ...2c627991d65c5c4e88c9ccac39be082cca40765-24 | Bin 82 -> 0 bytes ...435aa58e67c4de798375b44c11bffa5b680f615-32 | Bin 88 -> 0 bytes ...496965f7aa6cea3e080dbfb911a7034e6623cb7-10 | Bin 23 -> 0 bytes ...84a9bda8369d33ffe0d6f520c24331ae64e9dc88-3 | Bin 10 -> 0 bytes ...86513e3435adaf7c493dd50eb5de372010185e36-1 | 1 - ...6637b211f4fa0118ccab9ee193c66286126bb5d-20 | Bin 35 -> 0 bytes ...695984335fa005895377a8a60000a921d7efd99-10 | Bin 25 -> 0 bytes ...6baa53eb98a9a342b0d5b79dfa5c58aa9c1b05e-16 | Bin 35 -> 0 bytes ...7caf7737ebb025ec2d908224818ceb2bc76b658-28 | Bin 156 -> 0 bytes ...8e6e46ab1ec92ce694b8d4c3d816491169d2bb6-10 | Bin 25 -> 0 bytes ...9216c662a46d50f37cfa08963acad8c6f7aace7-11 | Bin 20 -> 0 bytes ...8d70b7de160bbef22ab46f798d687a69dbda772c-5 | 1 - ...8e533f8a1e58710d99d6b7d39af7034961aa4fbe-5 | 1 - ...f0d2862c49eebbcd473a38c8fa1e76288f47127-26 | Bin 51 -> 0 bytes ...f4788d30edd22ebcfef0e52bbf9e8c3d1e8d7e9-27 | Bin 105 -> 0 bytes ...f61ea021e02cc609baafbdf714b9577e4bcb05f-16 | Bin 74 -> 0 bytes ...8f7a47710904981ffaa1fefa21fa95fd2d818487-7 | Bin 23 -> 0 bytes .../github.com/pierrec/lz4/fuzz/corpus/9.bz2 | Bin 42 -> 0 bytes ...0a227d3beab730ed6eecd63657f5406beccabdf-12 | Bin 39 -> 0 bytes ...2197169aded0d5d0407e3925959e922257a101d-28 | Bin 195 -> 0 bytes ...24e17974cd194fa756d23394676d37cc3641f64-17 | Bin 25 -> 0 bytes ...92a785b5ea93d36e27029e281e9a34377d81ce55-5 | 1 - ...2d41e4fca52311e848fac274144b6293d9260f7-34 | Bin 65 -> 0 bytes ...92fda3aa2adbe37ff690c59939ca1e1b2a8a7936-1 | Bin 40 -> 0 bytes ...363b81db6b35e8beebcc32d560f786472829bd8-21 | Bin 88 -> 0 bytes ...48b1ce043c82d0cfbaa910b6989a1b35a19b8ae-16 | Bin 32 -> 0 bytes ...505b43fcbc3139441e35bdaaec138e28af076f6-25 | Bin 91 -> 0 bytes ...51bb02c199adb52e9e300e9fc070bf55980b910-14 | Bin 25 -> 0 bytes ...55404fe3f375361f5c3be1dbcd28eb9a28f06e4-13 | Bin 23 -> 0 bytes ...55c823909722e2693dd7cea3eadc17833dddf86-24 | Bin 88 -> 0 bytes ...5ca8da5556065f33b46c2c8186c2f1cebb1b5da-29 | Bin 112 -> 0 bytes ...67e50c6c1bc99aa5e7fa07c2de14564f52b0fd3-20 | Bin 28 -> 0 bytes ...6c9a1fa8b0184ad486f8f68a9ddc88434579080-30 | Bin 159 -> 0 bytes ...6cc45abef3bc9fb6659714b9743cda92ec0abb9-16 | Bin 38 -> 0 bytes ...719ea029fdf8c837f991ac3548145485cc1f06e-13 | Bin 73 -> 0 bytes ...84480af27d1640fd02f40e736ffcde3a91e4abb-22 | Bin 88 -> 0 bytes ...8d40a50ee58c05727777e242ecbc0d4e214f7fe-21 | Bin 35 -> 0 bytes ...915e9bb007bc2c1f3d346123933923279f0dec1-27 | Bin 51 -> 0 bytes ...992413e17d64968cb04af34c7761182f20fc97b6-2 | Bin 30 -> 0 bytes ...95d50f1cb750cbf038246d6cb0cf8db11d7e60e-33 | Bin 88 -> 0 bytes ...9cfa74a1fea5d16168dd9efc720425b85e95eb7-15 | Bin 32 -> 0 bytes ...a552bab72f174ede3b9bdb7a663c963fd1463d3-16 | Bin 32 -> 0 bytes ...a5ab6c72a445b3b27129004d2a1a417cd4d8440-26 | Bin 88 -> 0 bytes ...9aa3050cb38a6ad276cb5e5ca0c4776d92cb7b0f-1 | 1 - ...9be44693435bc6c51980f30418bcc690d8c25fe7-6 | 1 - ...c0420bf00f888487d543f42fc48b407c65d4717-17 | Bin 95 -> 0 bytes ...ca2a086f1f08c7dec54d52425bd72f17c11056e-21 | Bin 37 -> 0 bytes ...db70b1edad2317d94dcaafe7f5c5e3145084167-12 | Bin 37 -> 0 bytes ...e160ae007fc11092a3fd877ebe706c4d841db49-19 | Bin 32 -> 0 bytes .../fuzz/corpus/Mark.Twain-Tom.Sawyer.txt.bz2 | Bin 124744 -> 0 bytes ...01e13c3e401957031defb62b05434c65b01d5c4-10 | Bin 31 -> 0 bytes ...059044bdb0402471dbe9aaaa555a063a6bc1e6a-16 | Bin 38 -> 0 bytes ...06b1a08fcda463f1d51c485b0e7271ff9048b41-16 | Bin 35 -> 0 bytes ...0f3d67e96968a267366be380147cbc7b17e5b2b-16 | Bin 68 -> 0 bytes ...18d849dc2a98c4ebb6000b2cc853f21fb64d9e5-24 | Bin 195 -> 0 bytes ...2e5916be780e35e9ecb7c42be52dd5e134f3363-25 | Bin 195 -> 0 bytes ...33252a74974fc86df30c311d501a1f363d350cd-12 | Bin 33 -> 0 bytes ...462f03ee666a20244d3331e3635b7eb796d906d-15 | Bin 60 -> 0 bytes ...a56e983782e49f8267a61d4375e98b1a862862ac-9 | Bin 17 -> 0 bytes ...58a9f9caca5e73b4296b931201a5ea870974c26-15 | Bin 116 -> 0 bytes ...a628194a08ff63e98625b1786175026c5f02c716-5 | Bin 38 -> 0 bytes ...64f2336fd4a9ec8153b95f40c383e1ecfed9e73-25 | Bin 51 -> 0 bytes ...a6a5682a6663e0c548c9e5acbad4958e2c256b32-7 | Bin 60 -> 0 bytes ...6dbaac639f3b82609ec27c80fbd003684c28867-21 | Bin 61 -> 0 bytes ...8c6a4509b61d8baa71f59f9e1eb95712b10626c-23 | Bin 51 -> 0 bytes ...a97d9bf241e8ec73f99205b32c24fcd64194f0b9-8 | Bin 19 -> 0 bytes ...9e348d9896cc740f7e910d0a70c080adb65cc77-13 | Bin 34 -> 0 bytes ...a04575587509ffc65a6b0224d24ad1125cb0f63-26 | Bin 91 -> 0 bytes ...a290b4dcc8198945311c8149fc1252f14555e70-15 | Bin 63 -> 0 bytes ...abb8fa4913c79f0a42494ad2215a32927adbd45-16 | Bin 63 -> 0 bytes ...c7077c5220abe6cd481318c42dfe6cb2cb2c666-10 | Bin 40 -> 0 bytes ...cbef0322169a93c7421902883cc8057675c953b-26 | Bin 195 -> 0 bytes ...ec95871bc7d87cae16c36a0d30955b43076aec5-17 | Bin 35 -> 0 bytes ...20e3f27f4e8d41f16124881f92546f0fb2edc16-13 | Bin 33 -> 0 bytes ...27fb21ecbe6e77c91341738621ad7092c29bca5-17 | Bin 89 -> 0 bytes ...38ce47b707326024fb24860c4365d58ab9f3528-29 | Bin 166 -> 0 bytes ...b3eaea244bd47b64c8de3d81c7b5e94e421d7f32-5 | Bin 9 -> 0 bytes ...3fd355dc090a732d5cf3b25151f165ea901a682-24 | Bin 51 -> 0 bytes ...53101ec4348e9c329c13e22790ffde246743030-35 | Bin 71 -> 0 bytes ...58429fd1107617191026029cf327b2ebed963bb-18 | Bin 6 -> 0 bytes ...58846d79a8dc960a718ef88dd3a06ad49b1fe72-16 | Bin 35 -> 0 bytes ...b5b5b895b4619fa039ea99520b9947de2996c38f-6 | Bin 16 -> 0 bytes ...b6aca5c55295d93491e47817f46ca372c9078cec-3 | 1 - ...b6ddb90092b3087158dc32669529db2012f14c3c-7 | Bin 20 -> 0 bytes ...b6e7a519d013ddb67313af02a9ce966877949487-4 | Bin 24 -> 0 bytes ...b71a5a7c576e5cc5ba23845d352b2af16737c03c-7 | Bin 17 -> 0 bytes ...7815c3b5649d9a367ba99e7e09cf1f251ab6f83-18 | Bin 44 -> 0 bytes ...7a5b15c9e2d4d659d421de8e3b463200f71f1ec-23 | Bin 143 -> 0 bytes ...83b3d04ada1403578065d7f10aa7441830dea3c-11 | Bin 20 -> 0 bytes ...b92c70d3f12e67c69ba5db9ad491b7a4e075ece8-7 | Bin 23 -> 0 bytes ...94b7ebc6d153e0c99a97864f58b26f7192f66a5-20 | Bin 35 -> 0 bytes ...ba98469ede70309f18893f0ff95380f5a0486fcd-6 | Bin 52 -> 0 bytes ...c0c31f304c1a1f8be0c8a0d9daa3b8aa1f23799-14 | Bin 41 -> 0 bytes ...c3ac4aae07cba8d7f657a8739d1774e44bde613-31 | Bin 116 -> 0 bytes ...c650b6a5356c1935f64f6fb755e43bc5f5187c4-26 | Bin 195 -> 0 bytes ...dc123d9da19a7ae0ff87ca0741002fbd8bb2cca-34 | Bin 41 -> 0 bytes ...be06bb3c3b604660fd36b2af8860d35e31c8bbf3-8 | Bin 66 -> 0 bytes ...e5767f4d79c5a0b2643d8eddb74eca0598674dc-19 | Bin 75 -> 0 bytes ...07f4e4cb1d0a34dc6899097fd27ee9f1744cb70-12 | Bin 19 -> 0 bytes ...1972d0c898848e6188b69bcdbb7d14fcc780ee5-26 | Bin 88 -> 0 bytes ...2ac55a7fb702dd9a527b576d99008fe9b4f376f-14 | Bin 36 -> 0 bytes ...2c3d29bce8aae89fed326832b3e1e1077cef1da-18 | Bin 61 -> 0 bytes ...c321670bbcd985327045dd1468bf2ac4ae7333e5-7 | Bin 32 -> 0 bytes ...34998d9a8893eca9cdeafe7b2482469ad98192b-25 | Bin 51 -> 0 bytes ...42ae63ab9584753959f4692cef9fd8513b54691-30 | Bin 88 -> 0 bytes ...5522d11f314fc46de58e15116b6910d52acf866-17 | Bin 69 -> 0 bytes ...c652c46aba3567521f912bae6dc263b668c34c9c-7 | Bin 17 -> 0 bytes ...c6610b87900912d462229a5259dab51ea0aeef33-4 | 1 - ...c6c37f6c89fe55768f8b3f7b28b99467c239703a-1 | Bin 39 -> 0 bytes ...c71abfffdcf530a6d28fd99cd2c3505c61ef0ac5-8 | Bin 26 -> 0 bytes ...77304b250e887b39b5447d19b9c106fcebe7e66-20 | Bin 71 -> 0 bytes ...78cd8530e6d8a606a28797552ce3f5494763621-25 | Bin 195 -> 0 bytes ...790308a65efa1b895bc57abe53e4fbcdb2b7d0e-13 | Bin 88 -> 0 bytes ...c7fe1fe2e3fc19fab3766f9fdb1d22c848d49aed-2 | Bin 53 -> 0 bytes ...c8b01a7ea9c1b84e4ee5eb68121c64f183e7ea10-9 | Bin 34 -> 0 bytes ...a5d375d8a66727221d3e198d4ad360782944de7-27 | Bin 195 -> 0 bytes ...b1314cc880a1a389cedf5c16cc4b8ad505b4506-23 | Bin 105 -> 0 bytes ...cb635ef244cb6affc005c63d0bf8b52aecb1d986-4 | 1 - ...d67bf90feaeb1912792508afa01a09fe1f044c6-13 | Bin 33 -> 0 bytes ...cda434677d4bdd969a3bbf84086349f821e39c80-1 | Bin 48 -> 0 bytes ...eb22e7f581d85ed876e3d61da7df65da8954bf2-32 | Bin 88 -> 0 bytes ...fe7201e28d42484764264c231663e6372e95ef7-14 | Bin 32 -> 0 bytes ...ff88dd94ee94e1901d25a74e29ad863bb78b1e4-16 | Bin 32 -> 0 bytes ...cffc7573debb5af80aaddfa752538825275fd6a9-7 | 1 - ...0ae058f71e53a7afd648b859cd7485886be550d-22 | Bin 127 -> 0 bytes ...0e6298a63ffc2695cf7d016a124db7375f197cf-21 | Bin 88 -> 0 bytes ...24f23a23508dd6bc93ea6283ed49c8ba4b737ed-15 | Bin 49 -> 0 bytes ...d295ca4c78f7fd3ff10b0520b09a0a346310e0a9-1 | Bin 58 -> 0 bytes ...3ddffcd038a5646a53d48b684eac5b721c7062a-18 | Bin 59 -> 0 bytes ...4275f1f814a5b24f7b4788d15f3fef7b2be8aef-23 | Bin 72 -> 0 bytes ...d57eaf0fada8726afac2287cafb7720af7417b16-1 | 1 - ...d5c9dc3b5b4e71d902fe4cf5c44b237b104a32a9-4 | Bin 30 -> 0 bytes ...d7855c38db11bfeeb474a4782f1ea293192f786f-1 | Bin 42 -> 0 bytes ...7912c5e2a776c408e7640f10bd7d655a6a0f31b-27 | Bin 51 -> 0 bytes ...8873ec9a0344ea23f70d1ffd78c2fd0435b9885-27 | Bin 88 -> 0 bytes ...a3418e70658be491531ef6524f6ef7984ff9e96-27 | Bin 147 -> 0 bytes ...affc68f738bd5945de9c7babd4e01cc4438fae8-31 | Bin 88 -> 0 bytes ...dba53c14b92561071ccd7762550d53cf43027bdf-1 | Bin 32 -> 0 bytes ...c61bdd2fb983111d1392cd79ba9b39e0a3b869f-20 | Bin 71 -> 0 bytes ...dcb49d3d45d32601fa27208cec33813e03ff6179-1 | Bin 47 -> 0 bytes ...dce9966b94744440d75a845a48c806041f5a6612-3 | Bin 31 -> 0 bytes ...d799919262810add464dbb4ee39a38f1e4ed258-13 | Bin 25 -> 0 bytes .../dd92516fbea2d0f96abc78f325d731053a451e16 | 1 - ...df986569f89016184b5b6e924d5ba827c9980ca-28 | Bin 156 -> 0 bytes ...e0acf1136a1e05cd27345ce135ea26abd32bbfe-18 | Bin 36 -> 0 bytes ...e33e3ef8a5780c7d3458188a423c00f470904d0-15 | Bin 32 -> 0 bytes ...de501127da94246b2d3aa947637b49fbc17d5e47-1 | 1 - ...e702cd20caeb08a843e0c09b0ce87a74e300415-20 | Bin 71 -> 0 bytes ...e8abda1b9bd5628ca99c8f97237fa885a857bb5-19 | Bin 35 -> 0 bytes ...def6a9e986daf0b268ef29ef7e821a9f6840ef2c-8 | Bin 20 -> 0 bytes ...f0768cf0c709a1ff1a93cc0dad23979501c54ff-21 | Bin 115 -> 0 bytes ...f5bd5044e9b74c648b5f5fcb4dbdf953175f9f9-27 | Bin 88 -> 0 bytes ...dfad565009b0667ef2ee10ea9c1286ee5c3ce6b2-1 | 1 - .../pierrec/lz4/fuzz/corpus/e.txt.bz2 | Bin 43149 -> 0 bytes ...1556049ba9794a15ee21aa283876bf63e531a4f-24 | Bin 147 -> 0 bytes ...e17af76e8c119233dbd2888ab519bd76d7aa7fe9-6 | Bin 19 -> 0 bytes ...22a5ac115e8bfd3468c9e6ad73ea11b8743798a-30 | Bin 75 -> 0 bytes ...346c715ac3187598d8c0453d9e741fae1232c99-11 | Bin 29 -> 0 bytes ...3acf6f2b5a1b97f5a82ebf7d1822077561583fe-26 | Bin 195 -> 0 bytes ...e4a2a1469de980756c607cdc2584fc94bc109382-1 | Bin 58 -> 0 bytes ...544de8de59a005934dd4b7fd465c5bb0046482e-26 | Bin 88 -> 0 bytes ...68b04a675d8d4192565a808955764c77ae510e6-16 | Bin 122 -> 0 bytes ...7ea1bfd65ca7db84f0984474658bfc3b063c63a-13 | Bin 32 -> 0 bytes ...7f55f4c85203100c3cd819cdc87abb0e9e86374-32 | Bin 88 -> 0 bytes ...ea212596f8a7aec4eb2e85fd2cdb5c2816b58495-5 | Bin 12 -> 0 bytes ...a83e3b78398628e8a85e2e618fa956c0ffbd733-35 | Bin 42 -> 0 bytes ...a9af92f89e6889b523461ae7b2b9fecee5a7280-18 | Bin 130 -> 0 bytes ...b967d9cb0407c2328bbdbf98b5602274452d900-23 | Bin 32 -> 0 bytes ...bc69b7ca13ae23b075c9b21ebc283278714e3aa-18 | Bin 32 -> 0 bytes ...c8e760e79dc08a79af0d79c510cafb74e504472-18 | Bin 28 -> 0 bytes ...c93fb54ce508e132c89b6637913f84c3c78bafd-29 | Bin 67 -> 0 bytes ...c984b6fb8e41dbcd4299ecd1dd6fd0a77347122-13 | Bin 32 -> 0 bytes ...cbd6bdea50b52d263b4e9cdb96c7ce078d2b780-25 | Bin 147 -> 0 bytes ...cdd1df7d975c8cf8d015b2f1d0d7c6e00eb578b-15 | Bin 33 -> 0 bytes ...da1ee9cf85f3f71ec8a4eec7534ed2677b47775-15 | Bin 32 -> 0 bytes ...dbc11de7dd074c367a69532db023cd810bb3978-13 | Bin 43 -> 0 bytes ...ee6afbf375619a2bd6fb0abe0e42e51ab3b0ab13-6 | Bin 20 -> 0 bytes ...ee907d38c1394c4971b389a99a3be0913836212b-9 | Bin 72 -> 0 bytes ...ebbefa1983c9e1aeb5217aabcac7ab24dfe166f-17 | Bin 75 -> 0 bytes ...ee3d4a9a8b297f016c23f50a9792c30a621720e-21 | Bin 71 -> 0 bytes ...ef87432939473264357babc06257b0280ffd15ee-5 | 1 - ...fd3db86b12d209db7f0b24281a2cccebff526cd-33 | Bin 36 -> 0 bytes ...fdd522fe3abb88204f63b1fe7312f62b6ee593d-16 | Bin 67 -> 0 bytes ...31dcf6e3044e050f2396b601ebe420e89749c07-27 | Bin 117 -> 0 bytes ...35bdf2e8b4af93c6a73e564055aa4eacd9f0d0c-13 | Bin 25 -> 0 bytes ...f3a2381d6f39defe22520aea46201e6ce6d37f80-1 | Bin 31 -> 0 bytes ...f3e916907eab3412b5875e5eca05bf3eac8a8d5e-1 | Bin 37 -> 0 bytes ...3f49f3016c41052be090544cf110c322bc7ef63-24 | Bin 82 -> 0 bytes ...4003ca01b90a4ee1be5701a5dd7d5f04e00c8f8-28 | Bin 147 -> 0 bytes ...f493376c3eda80cbe822ac456486734b72f891fc-2 | Bin 44 -> 0 bytes ...55efbb04cd32f7828e951d067319db00627153f-28 | Bin 51 -> 0 bytes ...5ecb47dfd92bb0564588beefd03ffcb0bbdae54-29 | Bin 88 -> 0 bytes ...71b4776ecbbe47746fb53d7749751c5c5bbff05-22 | Bin 61 -> 0 bytes ...724d4c839c012c7772618e28ef68d478cc00c74-21 | Bin 37 -> 0 bytes ...f86152e5ce510dc674fa73d20b324e2d3c4d145b-1 | 1 - ...f931bee2e7f1fefd8bb2fabf88f8f3d2b3ea78fa-2 | Bin 36 -> 0 bytes ...9bcd3660c355799a865fedd15cb27a18591f244-33 | Bin 33 -> 0 bytes ...ac6c4165067ef2d87a23a2530a59eb560d470e0-23 | Bin 51 -> 0 bytes ...fb56a1001599e07354ce3101af111554c6c9bb40-1 | Bin 60 -> 0 bytes ...fb75f3059f8835a7e8781c899af756f22d1c06b4-7 | Bin 13 -> 0 bytes ...bfe35b0485040874ed564b94ba764bdd17e80fc-10 | Bin 20 -> 0 bytes ...cb1c8b1893ca85647581cadec481754d8f35c96-12 | Bin 25 -> 0 bytes ...cb33fb48e48acd9155fd7ed8e82e71c850ffd22-16 | Bin 32 -> 0 bytes ...fcd47a15e10a21e1eb13aeac223becc89aac4c69-2 | Bin 38 -> 0 bytes ...d4f0dc77a022a8140ffe5b2e1a5ff577e844878-27 | Bin 51 -> 0 bytes ...db78af507e72288b059ff902ae5e76538d1e6ea-14 | Bin 32 -> 0 bytes ...fe002e4c7731ecb4c09c09a4e1fa29c0c61874bc-7 | Bin 15 -> 0 bytes ...e78d4faf4ce717d84938010f92ca5e844f9980b-13 | Bin 24 -> 0 bytes ...f3b7ea844eb197dc6bd59d9f8e4a4a5718a6771-18 | Bin 36 -> 0 bytes ...f47856b8fa7323572c8b4a6d8028dcb2663a37a-11 | Bin 84 -> 0 bytes ...fa97253e1ab365b84eebb9d257f9370b7796fbf-28 | Bin 51 -> 0 bytes .../pierrec/lz4/fuzz/corpus/pss-vect.txt.bz2 | Bin 28526 -> 0 bytes .../github.com/pierrec/lz4/fuzz/lz4-fuzz.zip | Bin 2149434 -> 0 bytes vendor/github.com/pierrec/lz4/fuzz/lz4.go | 45 - vendor/github.com/pierrec/lz4/go.mod | 3 - vendor/github.com/pierrec/lz4/go.sum | 2 - .../pierrec/lz4/internal/xxh32/xxh32zero.go | 89 +- .../lz4/internal/xxh32/xxh32zero_test.go | 161 - vendor/github.com/pierrec/lz4/lz4.go | 78 +- vendor/github.com/pierrec/lz4/lz4c/main.go | 122 - vendor/github.com/pierrec/lz4/reader.go | 52 +- .../github.com/pierrec/lz4/reader_legacy.go | 207 + vendor/github.com/pierrec/lz4/reader_test.go | 60 - .../lz4/testdata/Mark.Twain-Tom.Sawyer.txt | 8465 -- .../testdata/Mark.Twain-Tom.Sawyer.txt.lz4 | Bin 256403 -> 0 bytes .../testdata/Mark.Twain-Tom.Sawyer_long.txt | 93115 ---------------- .../Mark.Twain-Tom.Sawyer_long.txt.lz4 | Bin 2811779 -> 0 bytes .../pierrec/lz4/testdata/README.txt | 1 - vendor/github.com/pierrec/lz4/testdata/e.txt | 1 - .../github.com/pierrec/lz4/testdata/e.txt.lz4 | Bin 95284 -> 0 bytes .../pierrec/lz4/testdata/gettysburg.txt | 29 - .../pierrec/lz4/testdata/gettysburg.txt.lz4 | Bin 1234 -> 0 bytes .../pierrec/lz4/testdata/pg1661.txt | 13052 --- .../pierrec/lz4/testdata/pg1661.txt.lz4 | Bin 377367 -> 0 bytes vendor/github.com/pierrec/lz4/testdata/pi.txt | 1 - .../pierrec/lz4/testdata/pi.txt.lz4 | Bin 95355 -> 0 bytes .../pierrec/lz4/testdata/random.data | Bin 16384 -> 0 bytes .../pierrec/lz4/testdata/random.data.lz4 | Bin 16403 -> 0 bytes .../pierrec/lz4/testdata/repeat.txt | 1 - .../pierrec/lz4/testdata/repeat.txt.lz4 | Bin 59 -> 0 bytes vendor/github.com/pierrec/lz4/writer.go | 232 +- vendor/github.com/pierrec/lz4/writer_test.go | 79 - .../prometheus/client_golang/.gitignore | 26 - .../prometheus/client_golang/.travis.yml | 9 - .../prometheus/client_golang/AUTHORS.md | 18 - .../prometheus/client_golang/CHANGELOG.md | 109 - .../prometheus/client_golang/CONTRIBUTING.md | 18 - .../prometheus/client_golang/README.md | 45 - .../prometheus/client_golang/VERSION | 1 - .../client_golang/api/prometheus/api.go | 345 - .../client_golang/api/prometheus/api_test.go | 453 - .../client_golang/examples/random/main.go | 103 - .../prometheus/benchmark_test.go | 183 - .../{metric_test.go => build_info.go} | 30 +- .../prometheus/build_info_pre_1.12.go | 22 + .../client_golang/prometheus/collector.go | 73 +- .../client_golang/prometheus/counter.go | 233 +- .../client_golang/prometheus/counter_test.go | 58 - .../client_golang/prometheus/desc.go | 75 +- .../client_golang/prometheus/doc.go | 116 +- .../prometheus/example_clustermanager_test.go | 118 - .../client_golang/prometheus/examples_test.go | 751 - .../prometheus/expvar_collector_test.go | 97 - .../client_golang/prometheus/fnv.go | 13 + .../client_golang/prometheus/gauge.go | 211 +- .../client_golang/prometheus/gauge_test.go | 182 - .../client_golang/prometheus/go_collector.go | 190 +- .../prometheus/go_collector_test.go | 123 - .../client_golang/prometheus/histogram.go | 353 +- .../prometheus/histogram_test.go | 326 - .../client_golang/prometheus/http.go | 490 - .../client_golang/prometheus/http_test.go | 121 - .../prometheus/internal/metric.go | 85 + .../client_golang/prometheus/labels.go | 87 + .../client_golang/prometheus/metric.go | 94 +- .../client_golang/prometheus/observer.go | 64 + .../prometheus/process_collector.go | 216 +- .../prometheus/process_collector_other.go | 65 + .../prometheus/process_collector_test.go | 58 - .../prometheus/process_collector_windows.go | 116 + .../prometheus/promhttp/delegator.go | 370 + .../client_golang/prometheus/promhttp/http.go | 364 +- .../prometheus/promhttp/http_test.go | 137 - .../prometheus/promhttp/instrument_client.go | 219 + .../prometheus/promhttp/instrument_server.go | 458 + .../prometheus/push/examples_test.go | 56 - .../client_golang/prometheus/push/push.go | 172 - .../prometheus/push/push_test.go | 176 - .../client_golang/prometheus/registry.go | 762 +- .../client_golang/prometheus/registry_test.go | 545 - .../client_golang/prometheus/summary.go | 325 +- .../client_golang/prometheus/summary_test.go | 347 - .../client_golang/prometheus/timer.go | 54 + .../client_golang/prometheus/untyped.go | 102 +- .../client_golang/prometheus/value.go | 158 +- .../client_golang/prometheus/vec.go | 518 +- .../client_golang/prometheus/vec_test.go | 312 - .../client_golang/prometheus/wrap.go | 214 + .../prometheus/client_model/.gitignore | 1 - .../prometheus/client_model/CONTRIBUTING.md | 18 - .../prometheus/client_model/MAINTAINERS.md | 1 - .../prometheus/client_model/Makefile | 62 - .../prometheus/client_model/README.md | 26 - .../prometheus/client_model/cpp/metrics.pb.cc | 3380 - .../prometheus/client_model/cpp/metrics.pb.h | 2072 - .../prometheus/client_model/go/metrics.pb.go | 268 +- .../prometheus/client_model/metrics.proto | 82 - .../prometheus/client_model/pom.xml | 130 - .../python/prometheus/__init__.py | 12 - .../python/prometheus/client/__init__.py | 12 - .../prometheus/client/model/__init__.py | 14 - .../prometheus/client/model/metrics_pb2.py | 575 - .../prometheus/client_model/ruby/.gitignore | 5 - .../prometheus/client_model/ruby/Gemfile | 4 - .../prometheus/client_model/ruby/LICENSE | 201 - .../prometheus/client_model/ruby/Makefile | 17 - .../prometheus/client_model/ruby/README.md | 31 - .../prometheus/client_model/ruby/Rakefile | 1 - .../ruby/lib/prometheus/client/model.rb | 2 - .../lib/prometheus/client/model/metrics.pb.rb | 111 - .../lib/prometheus/client/model/version.rb | 7 - .../ruby/prometheus-client-model.gemspec | 22 - .../prometheus/client_model/setup.py | 23 - .../java/io/prometheus/client/Metrics.java | 7683 -- .../github.com/prometheus/common/.travis.yml | 6 - .../prometheus/common/CONTRIBUTING.md | 18 - .../prometheus/common/MAINTAINERS.md | 1 - vendor/github.com/prometheus/common/README.md | 12 - .../prometheus/common/config/config.go | 34 - .../prometheus/common/config/http_config.go | 317 - .../common/config/http_config_test.go | 618 - .../common/config/testdata/barney-no-pass.key | 27 - .../common/config/testdata/barney.crt | 96 - .../config/testdata/basic-auth-password | 1 - .../common/config/testdata/bearer.token | 1 - .../testdata/http.conf.basic-auth.good.yaml | 3 - .../http.conf.basic-auth.no-password.yaml | 2 - .../http.conf.basic-auth.no-username.yaml | 2 - .../http.conf.basic-auth.too-much.bad.yaml | 4 - ...ttp.conf.bearer-token-and-file-set.bad.yml | 5 - .../config/testdata/http.conf.empty.bad.yml | 4 - .../common/config/testdata/http.conf.good.yml | 4 - ...ttp.conf.invalid-bearer-token-file.bad.yml | 1 - .../common/config/testdata/server.crt | 96 - .../common/config/testdata/server.key | 28 - .../common/config/testdata/tls-ca-chain.pem | 172 - .../testdata/tls_config.cert_no_key.bad.yml | 1 - .../config/testdata/tls_config.empty.good.yml | 0 .../testdata/tls_config.insecure.good.yml | 1 - .../testdata/tls_config.invalid_field.bad.yml | 1 - .../testdata/tls_config.key_no_cert.bad.yml | 1 - .../common/config/tls_config_test.go | 62 - .../prometheus/common/expfmt/bench_test.go | 167 - .../prometheus/common/expfmt/decode.go | 2 +- .../prometheus/common/expfmt/decode_test.go | 435 - .../prometheus/common/expfmt/encode.go | 124 +- .../prometheus/common/expfmt/expfmt.go | 11 +- .../expfmt/fuzz/corpus/from_test_parse_0 | 2 - .../expfmt/fuzz/corpus/from_test_parse_1 | 6 - .../expfmt/fuzz/corpus/from_test_parse_2 | 12 - .../expfmt/fuzz/corpus/from_test_parse_3 | 22 - .../expfmt/fuzz/corpus/from_test_parse_4 | 10 - .../fuzz/corpus/from_test_parse_error_0 | 1 - .../fuzz/corpus/from_test_parse_error_1 | 1 - .../fuzz/corpus/from_test_parse_error_10 | 1 - .../fuzz/corpus/from_test_parse_error_11 | 1 - .../fuzz/corpus/from_test_parse_error_12 | 3 - .../fuzz/corpus/from_test_parse_error_13 | 3 - .../fuzz/corpus/from_test_parse_error_14 | 3 - .../fuzz/corpus/from_test_parse_error_15 | 2 - .../fuzz/corpus/from_test_parse_error_16 | 2 - .../fuzz/corpus/from_test_parse_error_17 | 1 - .../fuzz/corpus/from_test_parse_error_18 | 1 - .../fuzz/corpus/from_test_parse_error_19 | 3 - .../fuzz/corpus/from_test_parse_error_2 | 3 - .../fuzz/corpus/from_test_parse_error_3 | 1 - .../fuzz/corpus/from_test_parse_error_4 | 1 - .../fuzz/corpus/from_test_parse_error_5 | 1 - .../fuzz/corpus/from_test_parse_error_6 | 1 - .../fuzz/corpus/from_test_parse_error_7 | 3 - .../fuzz/corpus/from_test_parse_error_8 | 1 - .../fuzz/corpus/from_test_parse_error_9 | 1 - .../common/expfmt/fuzz/corpus/minimal | 1 - .../common/expfmt/openmetrics_create.go | 527 + .../prometheus/common/expfmt/testdata/json2 | 46 - .../common/expfmt/testdata/json2_bad | 46 - .../common/expfmt/testdata/protobuf | Bin 8239 -> 0 bytes .../common/expfmt/testdata/protobuf.gz | Bin 2097 -> 0 bytes .../prometheus/common/expfmt/testdata/text | 322 - .../prometheus/common/expfmt/testdata/text.gz | Bin 2598 -> 0 bytes .../prometheus/common/expfmt/text_create.go | 21 +- .../common/expfmt/text_create_test.go | 561 - .../prometheus/common/expfmt/text_parse.go | 24 +- .../common/expfmt/text_parse_test.go | 593 - .../bitbucket.org/ww/goautoneg/autoneg.go | 6 +- .../ww/goautoneg/autoneg_test.go | 33 - .../github.com/prometheus/common/log/log.go | 4 + .../prometheus/common/log/log_test.go | 39 - .../common/log/syslog_formatter_test.go | 52 - .../prometheus/common/model/alert_test.go | 118 - .../github.com/prometheus/common/model/fnv.go | 2 +- .../prometheus/common/model/labels_test.go | 140 - .../prometheus/common/model/metric.go | 1 - .../prometheus/common/model/metric_test.go | 132 - .../prometheus/common/model/signature_test.go | 314 - .../prometheus/common/model/silence_test.go | 228 - .../prometheus/common/model/time.go | 138 +- .../prometheus/common/model/time_test.go | 132 - .../prometheus/common/model/value_test.go | 468 - .../prometheus/common/promlog/flag/flag.go | 43 - .../prometheus/common/promlog/log.go | 95 - .../prometheus/common/route/route.go | 110 - .../prometheus/common/route/route_test.go | 76 - .../prometheus/common/version/info.go | 19 +- .../prometheus/procfs/.circleci/config.yml | 49 - .../prometheus/procfs/.golangci.yml | 4 + .../prometheus/procfs/CODE_OF_CONDUCT.md | 3 + .../prometheus/procfs/CONTRIBUTING.md | 109 +- .../prometheus/procfs/MAINTAINERS.md | 3 +- vendor/github.com/prometheus/procfs/Makefile | 66 +- .../prometheus/procfs/Makefile.common | 302 + vendor/github.com/prometheus/procfs/README.md | 54 +- .../github.com/prometheus/procfs/SECURITY.md | 6 + vendor/github.com/prometheus/procfs/arp.go | 85 + .../prometheus/procfs/bcache/bcache.go | 84 - .../prometheus/procfs/bcache/get.go | 330 - .../prometheus/procfs/bcache/get_test.go | 114 - .../github.com/prometheus/procfs/buddyinfo.go | 18 +- .../prometheus/procfs/buddyinfo_test.go | 64 - .../github.com/prometheus/procfs/cpuinfo.go | 481 + .../procfs/{sysfs/doc.go => cpuinfo_armx.go} | 11 +- .../prometheus/procfs/cpuinfo_mipsx.go | 19 + .../prometheus/procfs/cpuinfo_others.go | 19 + .../prometheus/procfs/cpuinfo_ppcx.go | 19 + .../prometheus/procfs/cpuinfo_riscvx.go | 19 + .../prometheus/procfs/cpuinfo_s390x.go | 18 + .../prometheus/procfs/cpuinfo_x86.go | 19 + vendor/github.com/prometheus/procfs/crypto.go | 153 + .../prometheus/procfs/fixtures.ttar | 6331 +- vendor/github.com/prometheus/procfs/fs.go | 71 +- .../github.com/prometheus/procfs/fs_test.go | 39 - .../github.com/prometheus/procfs/fscache.go | 422 + vendor/github.com/prometheus/procfs/go.mod | 9 + vendor/github.com/prometheus/procfs/go.sum | 8 + .../prometheus/procfs/internal/fs/fs.go | 55 + .../prometheus/procfs/internal/util/parse.go | 38 + .../procfs/internal/util/readfile.go | 38 + .../{sysreadfile_linux.go => sysreadfile.go} | 7 +- .../internal/util/sysreadfile_compat.go} | 22 +- .../procfs/internal/util/valueparser.go | 91 + vendor/github.com/prometheus/procfs/ipvs.go | 42 +- .../github.com/prometheus/procfs/ipvs_test.go | 250 - .../prometheus/procfs/kernel_random.go | 62 + .../github.com/prometheus/procfs/loadavg.go | 62 + vendor/github.com/prometheus/procfs/mdstat.go | 168 +- .../prometheus/procfs/mdstat_test.go | 44 - .../github.com/prometheus/procfs/meminfo.go | 277 + .../github.com/prometheus/procfs/mountinfo.go | 180 + .../prometheus/procfs/mountstats.go | 78 +- .../prometheus/procfs/mountstats_test.go | 380 - .../prometheus/procfs/net_conntrackstat.go | 153 + .../github.com/prometheus/procfs/net_dev.go | 39 +- .../prometheus/procfs/net_dev_test.go | 86 - .../prometheus/procfs/net_ip_socket.go | 220 + .../prometheus/procfs/net_protocols.go | 180 + .../prometheus/procfs/net_sockstat.go | 163 + .../prometheus/procfs/net_softnet.go | 102 + .../github.com/prometheus/procfs/net_tcp.go | 64 + .../github.com/prometheus/procfs/net_udp.go | 64 + .../github.com/prometheus/procfs/net_unix.go | 257 + .../github.com/prometheus/procfs/nfs/nfs.go | 263 - .../github.com/prometheus/procfs/nfs/parse.go | 317 - .../prometheus/procfs/nfs/parse_nfs.go | 67 - .../prometheus/procfs/nfs/parse_nfs_test.go | 305 - .../prometheus/procfs/nfs/parse_nfsd.go | 89 - .../prometheus/procfs/nfs/parse_nfsd_test.go | 196 - vendor/github.com/prometheus/procfs/proc.go | 105 +- .../prometheus/procfs/proc_cgroup.go | 98 + .../prometheus/procfs/proc_environ.go | 37 + .../prometheus/procfs/proc_fdinfo.go | 133 + .../github.com/prometheus/procfs/proc_io.go | 16 +- .../prometheus/procfs/proc_io_test.go | 46 - .../prometheus/procfs/proc_limits.go | 94 +- .../prometheus/procfs/proc_limits_test.go | 44 - .../github.com/prometheus/procfs/proc_maps.go | 209 + .../github.com/prometheus/procfs/proc_ns.go | 10 +- .../prometheus/procfs/proc_ns_test.go | 44 - .../github.com/prometheus/procfs/proc_psi.go | 100 + .../prometheus/procfs/proc_smaps.go | 165 + .../github.com/prometheus/procfs/proc_stat.go | 33 +- .../prometheus/procfs/proc_stat_test.go | 123 - .../prometheus/procfs/proc_status.go | 170 + .../github.com/prometheus/procfs/proc_test.go | 230 - .../github.com/prometheus/procfs/schedstat.go | 121 + .../procfs/scripts/check_license.sh | 29 - vendor/github.com/prometheus/procfs/slab.go | 151 + vendor/github.com/prometheus/procfs/stat.go | 68 +- .../github.com/prometheus/procfs/stat_test.go | 74 - vendor/github.com/prometheus/procfs/swaps.go | 89 + .../prometheus/procfs/sysfs/.gitignore | 1 - .../prometheus/procfs/sysfs/fixtures.ttar | 999 - .../github.com/prometheus/procfs/sysfs/fs.go | 108 - .../prometheus/procfs/sysfs/fs_test.go | 108 - .../prometheus/procfs/sysfs/net_class.go | 153 - .../prometheus/procfs/sysfs/net_class_test.go | 90 - .../prometheus/procfs/sysfs/system_cpu.go | 141 - .../procfs/sysfs/system_cpu_test.go | 66 - vendor/github.com/prometheus/procfs/ttar | 42 +- vendor/github.com/prometheus/procfs/vm.go | 210 + vendor/github.com/prometheus/procfs/xfrm.go | 5 +- .../github.com/prometheus/procfs/xfrm_test.go | 66 - .../github.com/prometheus/procfs/xfs/parse.go | 330 - .../prometheus/procfs/xfs/parse_test.go | 442 - .../github.com/prometheus/procfs/xfs/xfs.go | 163 - .../github.com/prometheus/procfs/zoneinfo.go | 196 + .../rcrowley/go-metrics/.travis.yml | 20 +- .../github.com/rcrowley/go-metrics/README.md | 3 + .../cmd/metrics-bench/metrics-bench.go | 20 - .../cmd/metrics-example/metrics-example.go | 154 - .../go-metrics/cmd/never-read/never-read.go | 22 - .../rcrowley/go-metrics/counter_test.go | 77 - .../github.com/rcrowley/go-metrics/debug.go | 30 +- .../rcrowley/go-metrics/debug_test.go | 48 - .../rcrowley/go-metrics/ewma_test.go | 258 - .../github.com/rcrowley/go-metrics/exp/exp.go | 156 - .../rcrowley/go-metrics/gauge_float64_test.go | 69 - .../rcrowley/go-metrics/gauge_test.go | 87 - .../rcrowley/go-metrics/graphite_test.go | 22 - .../rcrowley/go-metrics/histogram_test.go | 95 - .../rcrowley/go-metrics/json_test.go | 28 - .../rcrowley/go-metrics/librato/client.go | 102 - .../rcrowley/go-metrics/librato/librato.go | 235 - vendor/github.com/rcrowley/go-metrics/log.go | 24 +- .../rcrowley/go-metrics/meter_test.go | 112 - .../rcrowley/go-metrics/metrics_test.go | 124 - .../rcrowley/go-metrics/opentsdb_test.go | 21 - .../rcrowley/go-metrics/registry.go | 24 +- .../rcrowley/go-metrics/registry_test.go | 363 - .../github.com/rcrowley/go-metrics/runtime.go | 130 +- .../rcrowley/go-metrics/runtime_test.go | 88 - .../rcrowley/go-metrics/sample_test.go | 363 - .../rcrowley/go-metrics/stathat/stathat.go | 69 - .../rcrowley/go-metrics/timer_test.go | 101 - .../rcrowley/go-metrics/validate.sh | 0 .../rcrowley/go-metrics/writer_test.go | 22 - .../github.com/samuel/go-zookeeper/.gitignore | 1 - .../samuel/go-zookeeper/.travis.yml | 33 - .../github.com/samuel/go-zookeeper/README.md | 11 - .../samuel/go-zookeeper/examples/basic.go | 22 - .../samuel/go-zookeeper/zk/cluster_test.go | 314 - .../github.com/samuel/go-zookeeper/zk/conn.go | 98 +- .../samuel/go-zookeeper/zk/conn_test.go | 57 - .../samuel/go-zookeeper/zk/constants.go | 25 +- .../samuel/go-zookeeper/zk/constants_test.go | 24 - .../go-zookeeper/zk/dnshostprovider_test.go | 224 - .../github.com/samuel/go-zookeeper/zk/flw.go | 8 +- .../samuel/go-zookeeper/zk/flw_test.go | 330 - .../samuel/go-zookeeper/zk/lock_test.go | 94 - .../samuel/go-zookeeper/zk/server_help.go | 216 - .../samuel/go-zookeeper/zk/server_java.go | 136 - .../samuel/go-zookeeper/zk/structs.go | 19 +- .../samuel/go-zookeeper/zk/structs_test.go | 83 - .../samuel/go-zookeeper/zk/throttle_test.go | 136 - .../samuel/go-zookeeper/zk/util_test.go | 53 - .../samuel/go-zookeeper/zk/zk_test.go | 939 - .../github.com/sirupsen/logrus/.golangci.yml | 40 + vendor/github.com/sirupsen/logrus/.travis.yml | 64 +- .../github.com/sirupsen/logrus/CHANGELOG.md | 58 + vendor/github.com/sirupsen/logrus/README.md | 46 +- vendor/github.com/sirupsen/logrus/alt_exit.go | 18 +- .../sirupsen/logrus/alt_exit_test.go | 99 - .../github.com/sirupsen/logrus/appveyor.yml | 28 +- vendor/github.com/sirupsen/logrus/entry.go | 188 +- .../github.com/sirupsen/logrus/entry_test.go | 115 - .../sirupsen/logrus/example_basic_test.go | 77 - .../logrus/example_global_hook_test.go | 36 - .../sirupsen/logrus/example_hook_test.go | 43 - vendor/github.com/sirupsen/logrus/exported.go | 8 +- .../sirupsen/logrus/formatter_bench_test.go | 101 - vendor/github.com/sirupsen/logrus/go.mod | 8 +- vendor/github.com/sirupsen/logrus/go.sum | 13 +- .../github.com/sirupsen/logrus/hook_test.go | 192 - .../sirupsen/logrus/hooks/syslog/README.md | 39 - .../sirupsen/logrus/hooks/syslog/syslog.go | 55 - .../logrus/hooks/syslog/syslog_test.go | 29 - .../logrus/internal/testutils/testutils.go | 58 - .../sirupsen/logrus/json_formatter.go | 26 +- .../sirupsen/logrus/json_formatter_test.go | 346 - vendor/github.com/sirupsen/logrus/logger.go | 173 +- .../sirupsen/logrus/logger_bench_test.go | 85 - .../github.com/sirupsen/logrus/logger_test.go | 42 - vendor/github.com/sirupsen/logrus/logrus.go | 44 +- .../github.com/sirupsen/logrus/logrus_test.go | 716 - .../sirupsen/logrus/terminal_check_bsd.go | 13 + .../sirupsen/logrus/terminal_check_js.go | 6 +- .../logrus/terminal_check_no_terminal.go | 11 + .../logrus/terminal_check_notappengine.go | 6 +- .../sirupsen/logrus/terminal_check_solaris.go | 11 + .../sirupsen/logrus/terminal_check_unix.go | 13 + .../sirupsen/logrus/terminal_check_windows.go | 18 +- .../sirupsen/logrus/terminal_notwindows.go | 8 - .../sirupsen/logrus/terminal_windows.go | 18 - .../sirupsen/logrus/text_formatter.go | 141 +- .../sirupsen/logrus/text_formatter_test.go | 480 - vendor/github.com/sirupsen/logrus/writer.go | 6 + .../scram/.gitignore} | 0 vendor/github.com/xdg/scram/.travis.yml | 11 + vendor/github.com/xdg/scram/LICENSE | 175 + vendor/github.com/xdg/scram/README.md | 71 + vendor/github.com/xdg/scram/client.go | 130 + vendor/github.com/xdg/scram/client_conv.go | 149 + vendor/github.com/xdg/scram/common.go | 97 + vendor/github.com/xdg/scram/doc.go | 24 + vendor/github.com/xdg/scram/parse.go | 205 + vendor/github.com/xdg/scram/scram.go | 66 + vendor/github.com/xdg/scram/server.go | 50 + vendor/github.com/xdg/scram/server_conv.go | 151 + .../stringprep/.gitignore} | 0 vendor/github.com/xdg/stringprep/.travis.yml | 10 + vendor/github.com/xdg/stringprep/LICENSE | 175 + vendor/github.com/xdg/stringprep/README.md | 27 + vendor/github.com/xdg/stringprep/bidi.go | 73 + vendor/github.com/xdg/stringprep/doc.go | 10 + vendor/github.com/xdg/stringprep/error.go | 14 + vendor/github.com/xdg/stringprep/map.go | 21 + vendor/github.com/xdg/stringprep/profile.go | 75 + vendor/github.com/xdg/stringprep/saslprep.go | 52 + vendor/github.com/xdg/stringprep/set.go | 36 + vendor/github.com/xdg/stringprep/tables.go | 3215 + vendor/golang.org/x/crypto/.gitattributes | 10 - vendor/golang.org/x/crypto/.gitignore | 2 - vendor/golang.org/x/crypto/CONTRIBUTING.md | 26 - vendor/golang.org/x/crypto/README.md | 21 - vendor/golang.org/x/crypto/acme/acme.go | 926 - vendor/golang.org/x/crypto/acme/acme_test.go | 1313 - .../x/crypto/acme/autocert/autocert.go | 1139 - .../x/crypto/acme/autocert/autocert_test.go | 1189 - .../x/crypto/acme/autocert/cache.go | 130 - .../x/crypto/acme/autocert/cache_test.go | 58 - .../x/crypto/acme/autocert/example_test.go | 34 - .../acme/autocert/internal/acmetest/ca.go | 416 - .../x/crypto/acme/autocert/listener.go | 157 - .../x/crypto/acme/autocert/renewal.go | 141 - .../x/crypto/acme/autocert/renewal_test.go | 329 - vendor/golang.org/x/crypto/acme/http.go | 281 - vendor/golang.org/x/crypto/acme/http_test.go | 209 - vendor/golang.org/x/crypto/acme/jws.go | 156 - vendor/golang.org/x/crypto/acme/jws_test.go | 394 - vendor/golang.org/x/crypto/acme/types.go | 329 - vendor/golang.org/x/crypto/acme/types_test.go | 63 - vendor/golang.org/x/crypto/argon2/argon2.go | 285 - .../golang.org/x/crypto/argon2/argon2_test.go | 233 - vendor/golang.org/x/crypto/argon2/blake2b.go | 53 - .../x/crypto/argon2/blamka_amd64.go | 60 - .../golang.org/x/crypto/argon2/blamka_amd64.s | 243 - .../x/crypto/argon2/blamka_generic.go | 163 - .../golang.org/x/crypto/argon2/blamka_ref.go | 15 - vendor/golang.org/x/crypto/bcrypt/base64.go | 35 - vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 295 - .../golang.org/x/crypto/bcrypt/bcrypt_test.go | 243 - vendor/golang.org/x/crypto/blake2b/blake2b.go | 289 - .../x/crypto/blake2b/blake2bAVX2_amd64.go | 37 - .../x/crypto/blake2b/blake2bAVX2_amd64.s | 750 - .../x/crypto/blake2b/blake2b_amd64.go | 24 - .../x/crypto/blake2b/blake2b_amd64.s | 281 - .../x/crypto/blake2b/blake2b_generic.go | 179 - .../x/crypto/blake2b/blake2b_ref.go | 11 - .../x/crypto/blake2b/blake2b_test.go | 847 - vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 - .../golang.org/x/crypto/blake2b/register.go | 32 - vendor/golang.org/x/crypto/blake2s/blake2s.go | 244 - .../x/crypto/blake2s/blake2s_386.go | 32 - .../golang.org/x/crypto/blake2s/blake2s_386.s | 435 - .../x/crypto/blake2s/blake2s_amd64.go | 37 - .../x/crypto/blake2s/blake2s_amd64.s | 438 - .../x/crypto/blake2s/blake2s_generic.go | 174 - .../x/crypto/blake2s/blake2s_ref.go | 17 - .../x/crypto/blake2s/blake2s_test.go | 1050 - vendor/golang.org/x/crypto/blake2s/blake2x.go | 178 - .../golang.org/x/crypto/blake2s/register.go | 21 - vendor/golang.org/x/crypto/blowfish/block.go | 159 - .../x/crypto/blowfish/blowfish_test.go | 274 - vendor/golang.org/x/crypto/blowfish/cipher.go | 91 - vendor/golang.org/x/crypto/blowfish/const.go | 199 - vendor/golang.org/x/crypto/bn256/bn256.go | 416 - .../golang.org/x/crypto/bn256/bn256_test.go | 304 - vendor/golang.org/x/crypto/bn256/constants.go | 44 - vendor/golang.org/x/crypto/bn256/curve.go | 287 - .../golang.org/x/crypto/bn256/example_test.go | 43 - vendor/golang.org/x/crypto/bn256/gfp12.go | 200 - vendor/golang.org/x/crypto/bn256/gfp2.go | 219 - vendor/golang.org/x/crypto/bn256/gfp6.go | 296 - vendor/golang.org/x/crypto/bn256/optate.go | 395 - vendor/golang.org/x/crypto/bn256/twist.go | 258 - vendor/golang.org/x/crypto/cast5/cast5.go | 526 - .../golang.org/x/crypto/cast5/cast5_test.go | 106 - .../chacha20poly1305/chacha20poly1305.go | 101 - .../chacha20poly1305_amd64.go | 86 - .../chacha20poly1305/chacha20poly1305_amd64.s | 2695 - .../chacha20poly1305_generic.go | 81 - .../chacha20poly1305_noasm.go | 15 - .../chacha20poly1305/chacha20poly1305_test.go | 255 - .../chacha20poly1305_vectors_test.go | 726 - .../chacha20poly1305/xchacha20poly1305.go | 104 - vendor/golang.org/x/crypto/codereview.cfg | 1 - vendor/golang.org/x/crypto/cryptobyte/asn1.go | 751 - .../x/crypto/cryptobyte/asn1/asn1.go | 46 - .../x/crypto/cryptobyte/asn1_test.go | 333 - .../golang.org/x/crypto/cryptobyte/builder.go | 337 - .../x/crypto/cryptobyte/cryptobyte_test.go | 516 - .../x/crypto/cryptobyte/example_test.go | 154 - .../golang.org/x/crypto/cryptobyte/string.go | 166 - .../x/crypto/curve25519/const_amd64.h | 8 - .../x/crypto/curve25519/const_amd64.s | 20 - .../x/crypto/curve25519/cswap_amd64.s | 65 - .../x/crypto/curve25519/curve25519.go | 834 - .../x/crypto/curve25519/curve25519_test.go | 39 - vendor/golang.org/x/crypto/curve25519/doc.go | 23 - .../x/crypto/curve25519/freeze_amd64.s | 73 - .../x/crypto/curve25519/ladderstep_amd64.s | 1377 - .../x/crypto/curve25519/mont25519_amd64.go | 240 - .../x/crypto/curve25519/mul_amd64.s | 169 - .../x/crypto/curve25519/square_amd64.s | 132 - vendor/golang.org/x/crypto/ed25519/ed25519.go | 217 - .../x/crypto/ed25519/ed25519_test.go | 220 - .../ed25519/internal/edwards25519/const.go | 1422 - .../internal/edwards25519/edwards25519.go | 1793 - .../x/crypto/ed25519/testdata/sign.input.gz | Bin 50330 -> 0 bytes .../golang.org/x/crypto/hkdf/example_test.go | 56 - vendor/golang.org/x/crypto/hkdf/hkdf.go | 93 - vendor/golang.org/x/crypto/hkdf/hkdf_test.go | 449 - .../internal/chacha20/chacha_generic.go | 264 - .../crypto/internal/chacha20/chacha_noasm.go | 16 - .../crypto/internal/chacha20/chacha_s390x.go | 30 - .../x/crypto/internal/chacha20/chacha_s390x.s | 283 - .../x/crypto/internal/chacha20/chacha_test.go | 225 - .../crypto/internal/chacha20/vectors_test.go | 578 - .../x/crypto/internal/chacha20/xor.go | 43 - .../x/crypto/internal/subtle/aliasing.go | 32 - .../internal/subtle/aliasing_appengine.go | 35 - .../x/crypto/internal/subtle/aliasing_test.go | 50 - .../golang.org/x/crypto/md4/example_test.go | 20 - vendor/golang.org/x/crypto/md4/md4.go | 4 + vendor/golang.org/x/crypto/md4/md4_test.go | 71 - vendor/golang.org/x/crypto/nacl/auth/auth.go | 58 - .../x/crypto/nacl/auth/auth_test.go | 172 - .../x/crypto/nacl/auth/example_test.go | 36 - vendor/golang.org/x/crypto/nacl/box/box.go | 103 - .../golang.org/x/crypto/nacl/box/box_test.go | 78 - .../x/crypto/nacl/box/example_test.go | 95 - .../x/crypto/nacl/secretbox/example_test.go | 53 - .../x/crypto/nacl/secretbox/secretbox.go | 173 - .../x/crypto/nacl/secretbox/secretbox_test.go | 154 - vendor/golang.org/x/crypto/nacl/sign/sign.go | 90 - .../x/crypto/nacl/sign/sign_test.go | 74 - vendor/golang.org/x/crypto/ocsp/ocsp.go | 781 - vendor/golang.org/x/crypto/ocsp/ocsp_test.go | 875 - .../x/crypto/openpgp/armor/armor.go | 219 - .../x/crypto/openpgp/armor/armor_test.go | 95 - .../x/crypto/openpgp/armor/encode.go | 160 - .../x/crypto/openpgp/canonical_text.go | 59 - .../x/crypto/openpgp/canonical_text_test.go | 52 - .../x/crypto/openpgp/clearsign/clearsign.go | 399 - .../openpgp/clearsign/clearsign_test.go | 278 - .../x/crypto/openpgp/elgamal/elgamal.go | 122 - .../x/crypto/openpgp/elgamal/elgamal_test.go | 49 - .../x/crypto/openpgp/errors/errors.go | 72 - vendor/golang.org/x/crypto/openpgp/keys.go | 693 - .../x/crypto/openpgp/keys_data_test.go | 200 - .../golang.org/x/crypto/openpgp/keys_test.go | 495 - .../x/crypto/openpgp/packet/compressed.go | 123 - .../crypto/openpgp/packet/compressed_test.go | 41 - .../x/crypto/openpgp/packet/config.go | 91 - .../x/crypto/openpgp/packet/encrypted_key.go | 206 - .../openpgp/packet/encrypted_key_test.go | 151 - .../x/crypto/openpgp/packet/literal.go | 89 - .../x/crypto/openpgp/packet/ocfb.go | 143 - .../x/crypto/openpgp/packet/ocfb_test.go | 46 - .../openpgp/packet/one_pass_signature.go | 73 - .../x/crypto/openpgp/packet/opaque.go | 162 - .../x/crypto/openpgp/packet/opaque_test.go | 67 - .../x/crypto/openpgp/packet/packet.go | 551 - .../x/crypto/openpgp/packet/packet_test.go | 255 - .../x/crypto/openpgp/packet/private_key.go | 385 - .../crypto/openpgp/packet/private_key_test.go | 249 - .../x/crypto/openpgp/packet/public_key.go | 753 - .../crypto/openpgp/packet/public_key_test.go | 228 - .../x/crypto/openpgp/packet/public_key_v3.go | 279 - .../openpgp/packet/public_key_v3_test.go | 82 - .../x/crypto/openpgp/packet/reader.go | 76 - .../x/crypto/openpgp/packet/signature.go | 731 - .../x/crypto/openpgp/packet/signature_test.go | 78 - .../x/crypto/openpgp/packet/signature_v3.go | 146 - .../openpgp/packet/signature_v3_test.go | 92 - .../openpgp/packet/symmetric_key_encrypted.go | 155 - .../packet/symmetric_key_encrypted_test.go | 117 - .../openpgp/packet/symmetrically_encrypted.go | 290 - .../packet/symmetrically_encrypted_test.go | 123 - .../x/crypto/openpgp/packet/userattribute.go | 91 - .../openpgp/packet/userattribute_test.go | 109 - .../x/crypto/openpgp/packet/userid.go | 160 - .../x/crypto/openpgp/packet/userid_test.go | 87 - vendor/golang.org/x/crypto/openpgp/read.go | 442 - .../golang.org/x/crypto/openpgp/read_test.go | 613 - vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 273 - .../x/crypto/openpgp/s2k/s2k_test.go | 137 - vendor/golang.org/x/crypto/openpgp/write.go | 416 - .../golang.org/x/crypto/openpgp/write_test.go | 362 - .../x/crypto/otr/libotr_test_helper.c | 197 - vendor/golang.org/x/crypto/otr/otr.go | 1415 - vendor/golang.org/x/crypto/otr/otr_test.go | 470 - vendor/golang.org/x/crypto/otr/smp.go | 572 - .../golang.org/x/crypto/pbkdf2/pbkdf2_test.go | 176 - .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 - .../x/crypto/pkcs12/bmp-string_test.go | 63 - vendor/golang.org/x/crypto/pkcs12/crypto.go | 131 - .../golang.org/x/crypto/pkcs12/crypto_test.go | 125 - vendor/golang.org/x/crypto/pkcs12/errors.go | 23 - .../crypto/pkcs12/internal/rc2/bench_test.go | 27 - .../x/crypto/pkcs12/internal/rc2/rc2.go | 271 - .../x/crypto/pkcs12/internal/rc2/rc2_test.go | 92 - vendor/golang.org/x/crypto/pkcs12/mac.go | 45 - vendor/golang.org/x/crypto/pkcs12/mac_test.go | 42 - vendor/golang.org/x/crypto/pkcs12/pbkdf.go | 170 - .../golang.org/x/crypto/pkcs12/pbkdf_test.go | 34 - vendor/golang.org/x/crypto/pkcs12/pkcs12.go | 346 - .../golang.org/x/crypto/pkcs12/pkcs12_test.go | 138 - vendor/golang.org/x/crypto/pkcs12/safebags.go | 57 - .../golang.org/x/crypto/poly1305/poly1305.go | 33 - .../x/crypto/poly1305/poly1305_test.go | 132 - .../golang.org/x/crypto/poly1305/sum_amd64.go | 22 - .../golang.org/x/crypto/poly1305/sum_amd64.s | 125 - .../golang.org/x/crypto/poly1305/sum_arm.go | 22 - vendor/golang.org/x/crypto/poly1305/sum_arm.s | 427 - .../golang.org/x/crypto/poly1305/sum_noasm.go | 14 - .../golang.org/x/crypto/poly1305/sum_ref.go | 139 - .../golang.org/x/crypto/poly1305/sum_s390x.go | 49 - .../golang.org/x/crypto/poly1305/sum_s390x.s | 400 - .../x/crypto/poly1305/sum_vmsl_s390x.s | 931 - .../x/crypto/poly1305/vectors_test.go | 2943 - .../x/crypto/ripemd160/ripemd160.go | 120 - .../x/crypto/ripemd160/ripemd160_test.go | 72 - .../x/crypto/ripemd160/ripemd160block.go | 165 - .../x/crypto/salsa20/salsa/hsalsa20.go | 144 - .../x/crypto/salsa20/salsa/salsa2020_amd64.s | 889 - .../x/crypto/salsa20/salsa/salsa208.go | 199 - .../x/crypto/salsa20/salsa/salsa20_amd64.go | 24 - .../x/crypto/salsa20/salsa/salsa20_ref.go | 234 - .../x/crypto/salsa20/salsa/salsa_test.go | 54 - vendor/golang.org/x/crypto/salsa20/salsa20.go | 58 - .../x/crypto/salsa20/salsa20_test.go | 139 - .../x/crypto/scrypt/example_test.go | 26 - vendor/golang.org/x/crypto/scrypt/scrypt.go | 244 - .../golang.org/x/crypto/scrypt/scrypt_test.go | 162 - vendor/golang.org/x/crypto/sha3/doc.go | 66 - vendor/golang.org/x/crypto/sha3/hashes.go | 91 - .../x/crypto/sha3/hashes_generic.go | 27 - vendor/golang.org/x/crypto/sha3/keccakf.go | 412 - .../golang.org/x/crypto/sha3/keccakf_amd64.go | 13 - .../golang.org/x/crypto/sha3/keccakf_amd64.s | 390 - vendor/golang.org/x/crypto/sha3/register.go | 18 - vendor/golang.org/x/crypto/sha3/sha3.go | 192 - vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 289 - vendor/golang.org/x/crypto/sha3/sha3_s390x.s | 49 - vendor/golang.org/x/crypto/sha3/sha3_test.go | 337 - vendor/golang.org/x/crypto/sha3/shake.go | 70 - .../golang.org/x/crypto/sha3/shake_generic.go | 19 - .../sha3/testdata/keccakKats.json.deflate | Bin 521342 -> 0 bytes vendor/golang.org/x/crypto/sha3/xor.go | 16 - .../golang.org/x/crypto/sha3/xor_generic.go | 28 - .../golang.org/x/crypto/sha3/xor_unaligned.go | 58 - .../golang.org/x/crypto/ssh/agent/client.go | 789 - .../x/crypto/ssh/agent/client_test.go | 466 - .../x/crypto/ssh/agent/example_test.go | 41 - .../golang.org/x/crypto/ssh/agent/forward.go | 103 - .../golang.org/x/crypto/ssh/agent/keyring.go | 241 - .../x/crypto/ssh/agent/keyring_test.go | 76 - .../golang.org/x/crypto/ssh/agent/server.go | 567 - .../x/crypto/ssh/agent/server_test.go | 259 - .../x/crypto/ssh/agent/testdata_test.go | 64 - .../golang.org/x/crypto/ssh/benchmark_test.go | 123 - vendor/golang.org/x/crypto/ssh/buffer.go | 97 - vendor/golang.org/x/crypto/ssh/buffer_test.go | 87 - vendor/golang.org/x/crypto/ssh/certs.go | 535 - vendor/golang.org/x/crypto/ssh/certs_test.go | 335 - vendor/golang.org/x/crypto/ssh/channel.go | 633 - vendor/golang.org/x/crypto/ssh/cipher.go | 770 - vendor/golang.org/x/crypto/ssh/cipher_test.go | 131 - vendor/golang.org/x/crypto/ssh/client.go | 278 - vendor/golang.org/x/crypto/ssh/client_auth.go | 525 - .../x/crypto/ssh/client_auth_test.go | 678 - vendor/golang.org/x/crypto/ssh/client_test.go | 166 - vendor/golang.org/x/crypto/ssh/common.go | 383 - vendor/golang.org/x/crypto/ssh/connection.go | 143 - vendor/golang.org/x/crypto/ssh/doc.go | 21 - .../golang.org/x/crypto/ssh/example_test.go | 320 - vendor/golang.org/x/crypto/ssh/handshake.go | 646 - .../golang.org/x/crypto/ssh/handshake_test.go | 559 - vendor/golang.org/x/crypto/ssh/kex.go | 540 - vendor/golang.org/x/crypto/ssh/kex_test.go | 50 - vendor/golang.org/x/crypto/ssh/keys.go | 1100 - vendor/golang.org/x/crypto/ssh/keys_test.go | 574 - .../x/crypto/ssh/knownhosts/knownhosts.go | 540 - .../crypto/ssh/knownhosts/knownhosts_test.go | 356 - vendor/golang.org/x/crypto/ssh/mac.go | 61 - .../golang.org/x/crypto/ssh/mempipe_test.go | 110 - vendor/golang.org/x/crypto/ssh/messages.go | 766 - .../golang.org/x/crypto/ssh/messages_test.go | 288 - vendor/golang.org/x/crypto/ssh/mux.go | 330 - vendor/golang.org/x/crypto/ssh/mux_test.go | 501 - vendor/golang.org/x/crypto/ssh/server.go | 594 - vendor/golang.org/x/crypto/ssh/session.go | 647 - .../golang.org/x/crypto/ssh/session_test.go | 774 - vendor/golang.org/x/crypto/ssh/streamlocal.go | 116 - vendor/golang.org/x/crypto/ssh/tcpip.go | 474 - vendor/golang.org/x/crypto/ssh/tcpip_test.go | 20 - .../x/crypto/ssh/terminal/terminal.go | 951 - .../x/crypto/ssh/terminal/terminal_test.go | 358 - .../golang.org/x/crypto/ssh/terminal/util.go | 114 - .../x/crypto/ssh/terminal/util_bsd.go | 12 - .../x/crypto/ssh/terminal/util_linux.go | 10 - .../x/crypto/ssh/terminal/util_plan9.go | 58 - .../x/crypto/ssh/terminal/util_solaris.go | 124 - .../x/crypto/ssh/terminal/util_windows.go | 103 - .../golang.org/x/crypto/ssh/testdata/doc.go | 8 - .../golang.org/x/crypto/ssh/testdata/keys.go | 227 - .../golang.org/x/crypto/ssh/testdata_test.go | 63 - vendor/golang.org/x/crypto/ssh/transport.go | 353 - .../golang.org/x/crypto/ssh/transport_test.go | 113 - vendor/golang.org/x/crypto/tea/cipher.go | 108 - vendor/golang.org/x/crypto/tea/tea_test.go | 93 - vendor/golang.org/x/crypto/twofish/twofish.go | 342 - .../x/crypto/twofish/twofish_test.go | 129 - vendor/golang.org/x/crypto/xtea/block.go | 66 - vendor/golang.org/x/crypto/xtea/cipher.go | 82 - vendor/golang.org/x/crypto/xtea/xtea_test.go | 229 - vendor/golang.org/x/crypto/xts/xts.go | 145 - vendor/golang.org/x/crypto/xts/xts_test.go | 105 - vendor/golang.org/x/net/AUTHORS | 3 + vendor/golang.org/x/net/CONTRIBUTORS | 3 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/http2/hpack/encode.go | 240 + vendor/golang.org/x/net/http2/hpack/hpack.go | 504 + .../golang.org/x/net/http2/hpack/huffman.go | 229 + vendor/golang.org/x/net/http2/hpack/tables.go | 479 + .../golang.org/x/net/internal/socks/client.go | 168 + .../golang.org/x/net/internal/socks/socks.go | 317 + vendor/golang.org/x/net/proxy/dial.go | 54 + vendor/golang.org/x/net/proxy/direct.go | 31 + vendor/golang.org/x/net/proxy/per_host.go | 155 + vendor/golang.org/x/net/proxy/proxy.go | 149 + vendor/golang.org/x/net/proxy/socks5.go | 42 + vendor/golang.org/x/sys/.gitattributes | 10 - vendor/golang.org/x/sys/.gitignore | 2 - vendor/golang.org/x/sys/CONTRIBUTING.md | 26 - vendor/golang.org/x/sys/README.md | 18 - vendor/golang.org/x/sys/codereview.cfg | 1 - vendor/golang.org/x/sys/cpu/cpu.go | 38 - vendor/golang.org/x/sys/cpu/cpu_arm.go | 7 - vendor/golang.org/x/sys/cpu/cpu_arm64.go | 7 - vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 16 - vendor/golang.org/x/sys/cpu/cpu_gccgo.c | 43 - vendor/golang.org/x/sys/cpu/cpu_gccgo.go | 26 - vendor/golang.org/x/sys/cpu/cpu_s390x.go | 7 - vendor/golang.org/x/sys/cpu/cpu_test.go | 28 - vendor/golang.org/x/sys/cpu/cpu_x86.go | 55 - vendor/golang.org/x/sys/cpu/cpu_x86.s | 27 - .../sys/internal/unsafeheader/unsafeheader.go | 30 + vendor/golang.org/x/sys/plan9/asm.s | 8 - vendor/golang.org/x/sys/plan9/const_plan9.go | 70 - vendor/golang.org/x/sys/plan9/dir_plan9.go | 212 - vendor/golang.org/x/sys/plan9/env_plan9.go | 31 - vendor/golang.org/x/sys/plan9/errors_plan9.go | 50 - vendor/golang.org/x/sys/plan9/mkall.sh | 138 - vendor/golang.org/x/sys/plan9/mkerrors.sh | 246 - vendor/golang.org/x/sys/plan9/mksyscall.pl | 319 - .../golang.org/x/sys/plan9/mksysnum_plan9.sh | 23 - .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 21 - vendor/golang.org/x/sys/plan9/pwd_plan9.go | 23 - vendor/golang.org/x/sys/plan9/race.go | 30 - vendor/golang.org/x/sys/plan9/race0.go | 25 - vendor/golang.org/x/sys/plan9/str.go | 22 - vendor/golang.org/x/sys/plan9/syscall.go | 77 - .../golang.org/x/sys/plan9/syscall_plan9.go | 349 - vendor/golang.org/x/sys/plan9/syscall_test.go | 33 - .../x/sys/plan9/zsyscall_plan9_386.go | 292 - .../x/sys/plan9/zsyscall_plan9_amd64.go | 292 - .../x/sys/plan9/zsyscall_plan9_arm.go | 284 - .../golang.org/x/sys/plan9/zsysnum_plan9.go | 49 - vendor/golang.org/x/sys/unix/README.md | 35 +- .../golang.org/x/sys/unix/affinity_linux.go | 42 +- vendor/golang.org/x/sys/unix/aliases.go | 3 +- vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 2 +- vendor/golang.org/x/sys/unix/asm_darwin_386.s | 2 +- .../golang.org/x/sys/unix/asm_darwin_amd64.s | 2 +- vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 2 +- .../golang.org/x/sys/unix/asm_darwin_arm64.s | 2 +- .../x/sys/unix/asm_dragonfly_amd64.s | 2 +- .../golang.org/x/sys/unix/asm_freebsd_386.s | 2 +- .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 2 +- .../golang.org/x/sys/unix/asm_freebsd_arm.s | 2 +- .../asm_freebsd_arm64.s} | 19 +- vendor/golang.org/x/sys/unix/asm_linux_386.s | 2 +- .../golang.org/x/sys/unix/asm_linux_amd64.s | 2 +- vendor/golang.org/x/sys/unix/asm_linux_arm.s | 2 +- .../golang.org/x/sys/unix/asm_linux_arm64.s | 2 +- .../golang.org/x/sys/unix/asm_linux_mips64x.s | 2 +- .../golang.org/x/sys/unix/asm_linux_mipsx.s | 2 +- .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 2 +- .../golang.org/x/sys/unix/asm_linux_riscv64.s | 47 + .../golang.org/x/sys/unix/asm_linux_s390x.s | 2 +- vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 2 +- .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 2 +- vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 2 +- .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_386.s | 2 +- .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 2 +- .../golang.org/x/sys/unix/asm_openbsd_arm.s | 2 +- .../asm_openbsd_arm64.s} | 22 +- .../asm_openbsd_mips64.s} | 23 +- .../golang.org/x/sys/unix/asm_solaris_amd64.s | 2 +- vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 426 + .../golang.org/x/sys/unix/bluetooth_linux.go | 1 + vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 + vendor/golang.org/x/sys/unix/constants.go | 3 +- vendor/golang.org/x/sys/unix/creds_test.go | 134 - vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 4 +- vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 4 +- .../golang.org/x/sys/unix/dev_linux_test.go | 56 - vendor/golang.org/x/sys/unix/dev_zos.go | 29 + vendor/golang.org/x/sys/unix/dirent.go | 92 +- vendor/golang.org/x/sys/unix/endian_big.go | 3 +- vendor/golang.org/x/sys/unix/endian_little.go | 3 +- vendor/golang.org/x/sys/unix/env_unix.go | 3 +- vendor/golang.org/x/sys/unix/epoll_zos.go | 221 + .../x/sys/unix/errors_freebsd_386.go | 6 + .../x/sys/unix/errors_freebsd_amd64.go | 6 + .../x/sys/unix/errors_freebsd_arm64.go | 17 + vendor/golang.org/x/sys/unix/example_test.go | 30 - vendor/golang.org/x/sys/unix/export_test.go | 9 - vendor/golang.org/x/sys/unix/fcntl.go | 15 +- vendor/golang.org/x/sys/unix/fcntl_darwin.go | 24 + .../x/sys/unix/fcntl_linux_32bit.go | 5 +- vendor/golang.org/x/sys/unix/fdset.go | 30 + vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 + vendor/golang.org/x/sys/unix/gccgo.go | 6 +- vendor/golang.org/x/sys/unix/gccgo_c.c | 6 + .../x/sys/unix/gccgo_linux_amd64.go | 1 + vendor/golang.org/x/sys/unix/ioctl.go | 51 +- vendor/golang.org/x/sys/unix/ioctl_zos.go | 74 + vendor/golang.org/x/sys/unix/linux/Dockerfile | 53 - vendor/golang.org/x/sys/unix/linux/mkall.go | 760 - .../golang.org/x/sys/unix/linux/mksysnum.pl | 85 - vendor/golang.org/x/sys/unix/linux/types.go | 1638 - vendor/golang.org/x/sys/unix/mkall.sh | 123 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 163 +- vendor/golang.org/x/sys/unix/mkpost.go | 102 - vendor/golang.org/x/sys/unix/mksyscall.pl | 341 - .../x/sys/unix/mksyscall_aix_ppc.pl | 384 - .../x/sys/unix/mksyscall_aix_ppc64.pl | 579 - .../x/sys/unix/mksyscall_solaris.pl | 294 - .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 265 - .../golang.org/x/sys/unix/mksysnum_darwin.pl | 39 - .../x/sys/unix/mksysnum_dragonfly.pl | 50 - .../golang.org/x/sys/unix/mksysnum_freebsd.pl | 50 - .../golang.org/x/sys/unix/mksysnum_netbsd.pl | 58 - .../golang.org/x/sys/unix/mksysnum_openbsd.pl | 50 - .../golang.org/x/sys/unix/mmap_unix_test.go | 41 - vendor/golang.org/x/sys/unix/openbsd_test.go | 113 - vendor/golang.org/x/sys/unix/pagesize_unix.go | 1 + .../{openbsd_pledge.go => pledge_openbsd.go} | 3 - vendor/golang.org/x/sys/unix/ptrace_darwin.go | 12 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 12 + vendor/golang.org/x/sys/unix/race.go | 1 + vendor/golang.org/x/sys/unix/race0.go | 3 +- .../x/sys/unix/readdirent_getdents.go | 13 + .../x/sys/unix/readdirent_getdirentries.go | 20 + .../x/sys/unix/sockcmsg_dragonfly.go | 16 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 2 +- vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 29 +- .../x/sys/unix/sockcmsg_unix_other.go | 47 + vendor/golang.org/x/sys/unix/str.go | 1 + vendor/golang.org/x/sys/unix/syscall.go | 47 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 134 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 24 +- .../x/sys/unix/syscall_aix_ppc64.go | 55 +- .../golang.org/x/sys/unix/syscall_aix_test.go | 162 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 80 +- .../golang.org/x/sys/unix/syscall_bsd_test.go | 89 - .../x/sys/unix/syscall_darwin.1_12.go | 32 + .../x/sys/unix/syscall_darwin.1_13.go | 108 + .../golang.org/x/sys/unix/syscall_darwin.go | 255 +- .../x/sys/unix/syscall_darwin_386.go | 45 +- .../x/sys/unix/syscall_darwin_amd64.go | 45 +- .../x/sys/unix/syscall_darwin_arm.go | 47 +- .../x/sys/unix/syscall_darwin_arm64.go | 45 +- .../x/sys/unix/syscall_darwin_libSystem.go | 34 + .../x/sys/unix/syscall_darwin_test.go | 63 - .../x/sys/unix/syscall_dragonfly.go | 122 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 5 + .../golang.org/x/sys/unix/syscall_freebsd.go | 205 +- .../x/sys/unix/syscall_freebsd_386.go | 15 + .../x/sys/unix/syscall_freebsd_amd64.go | 15 + .../x/sys/unix/syscall_freebsd_arm.go | 11 + .../x/sys/unix/syscall_freebsd_arm64.go | 63 + .../x/sys/unix/syscall_freebsd_test.go | 312 - .../golang.org/x/sys/unix/syscall_illumos.go | 78 + vendor/golang.org/x/sys/unix/syscall_linux.go | 971 +- .../x/sys/unix/syscall_linux_386.go | 21 +- .../x/sys/unix/syscall_linux_amd64.go | 32 +- .../x/sys/unix/syscall_linux_amd64_gc.go | 4 +- .../x/sys/unix/syscall_linux_arm.go | 54 +- .../x/sys/unix/syscall_linux_arm64.go | 53 +- .../golang.org/x/sys/unix/syscall_linux_gc.go | 3 +- .../x/sys/unix/syscall_linux_gc_386.go | 3 +- .../x/sys/unix/syscall_linux_gc_arm.go | 14 + .../x/sys/unix/syscall_linux_gccgo_386.go | 1 + .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 + .../x/sys/unix/syscall_linux_mips64x.go | 27 +- .../x/sys/unix/syscall_linux_mipsx.go | 20 +- .../x/sys/unix/syscall_linux_ppc64x.go | 16 +- .../x/sys/unix/syscall_linux_riscv64.go | 36 +- .../x/sys/unix/syscall_linux_s390x.go | 14 +- .../x/sys/unix/syscall_linux_sparc64.go | 16 +- .../x/sys/unix/syscall_linux_test.go | 484 - .../golang.org/x/sys/unix/syscall_netbsd.go | 133 +- .../x/sys/unix/syscall_netbsd_386.go | 5 + .../x/sys/unix/syscall_netbsd_amd64.go | 5 + .../x/sys/unix/syscall_netbsd_arm.go | 5 + .../x/sys/unix/syscall_netbsd_arm64.go | 38 + .../x/sys/unix/syscall_netbsd_test.go | 51 - .../golang.org/x/sys/unix/syscall_openbsd.go | 108 +- .../x/sys/unix/syscall_openbsd_386.go | 5 + .../x/sys/unix/syscall_openbsd_amd64.go | 5 + .../x/sys/unix/syscall_openbsd_arm.go | 5 + .../x/sys/unix/syscall_openbsd_arm64.go | 42 + .../x/sys/unix/syscall_openbsd_mips64.go | 35 + .../x/sys/unix/syscall_openbsd_test.go | 49 - .../golang.org/x/sys/unix/syscall_solaris.go | 81 +- .../x/sys/unix/syscall_solaris_amd64.go | 5 + .../x/sys/unix/syscall_solaris_test.go | 55 - vendor/golang.org/x/sys/unix/syscall_test.go | 60 - vendor/golang.org/x/sys/unix/syscall_unix.go | 87 +- .../golang.org/x/sys/unix/syscall_unix_gc.go | 5 +- .../x/sys/unix/syscall_unix_gc_ppc64x.go | 3 +- .../x/sys/unix/syscall_unix_test.go | 660 - .../x/sys/unix/syscall_zos_s390x.go | 1781 + vendor/golang.org/x/sys/unix/timestruct.go | 29 +- .../golang.org/x/sys/unix/timestruct_test.go | 54 - vendor/golang.org/x/sys/unix/types_aix.go | 236 - vendor/golang.org/x/sys/unix/types_darwin.go | 277 - .../golang.org/x/sys/unix/types_dragonfly.go | 263 - vendor/golang.org/x/sys/unix/types_freebsd.go | 356 - vendor/golang.org/x/sys/unix/types_netbsd.go | 289 - vendor/golang.org/x/sys/unix/types_openbsd.go | 276 - vendor/golang.org/x/sys/unix/types_solaris.go | 266 - .../{openbsd_unveil.go => unveil_openbsd.go} | 2 - vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 + vendor/golang.org/x/sys/unix/xattr_test.go | 207 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 15 +- .../x/sys/unix/zerrors_aix_ppc64.go | 17 +- .../x/sys/unix/zerrors_darwin_386.go | 8 +- .../x/sys/unix/zerrors_darwin_amd64.go | 70 +- .../x/sys/unix/zerrors_darwin_arm.go | 8 +- .../x/sys/unix/zerrors_darwin_arm64.go | 70 +- .../x/sys/unix/zerrors_dragonfly_amd64.go | 140 +- .../x/sys/unix/zerrors_freebsd_386.go | 176 +- .../x/sys/unix/zerrors_freebsd_amd64.go | 174 +- .../x/sys/unix/zerrors_freebsd_arm.go | 32 +- .../x/sys/unix/zerrors_freebsd_arm64.go | 1943 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 2803 + .../x/sys/unix/zerrors_linux_386.go | 2867 +- .../x/sys/unix/zerrors_linux_amd64.go | 2867 +- .../x/sys/unix/zerrors_linux_arm.go | 2879 +- .../x/sys/unix/zerrors_linux_arm64.go | 2855 +- .../x/sys/unix/zerrors_linux_mips.go | 2871 +- .../x/sys/unix/zerrors_linux_mips64.go | 2871 +- .../x/sys/unix/zerrors_linux_mips64le.go | 2871 +- .../x/sys/unix/zerrors_linux_mipsle.go | 2871 +- .../x/sys/unix/zerrors_linux_ppc64.go | 2988 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 2988 +- .../x/sys/unix/zerrors_linux_riscv64.go | 2841 +- .../x/sys/unix/zerrors_linux_s390x.go | 2987 +- .../x/sys/unix/zerrors_linux_sparc64.go | 1682 +- .../x/sys/unix/zerrors_netbsd_386.go | 10 +- .../x/sys/unix/zerrors_netbsd_amd64.go | 10 +- .../x/sys/unix/zerrors_netbsd_arm.go | 10 +- .../x/sys/unix/zerrors_netbsd_arm64.go | 1770 + .../x/sys/unix/zerrors_openbsd_386.go | 25 +- .../x/sys/unix/zerrors_openbsd_amd64.go | 14 +- .../x/sys/unix/zerrors_openbsd_arm.go | 19 +- .../x/sys/unix/zerrors_openbsd_arm64.go | 1798 + .../x/sys/unix/zerrors_openbsd_mips64.go | 1863 + .../x/sys/unix/zerrors_solaris_amd64.go | 26 +- .../x/sys/unix/zerrors_zos_s390x.go | 831 + ...acearm_linux.go => zptrace_armnn_linux.go} | 3 +- .../x/sys/unix/zptrace_linux_arm64.go | 17 + ...emips_linux.go => zptrace_mipsnn_linux.go} | 3 +- ...sle_linux.go => zptrace_mipsnnle_linux.go} | 3 +- ...trace386_linux.go => zptrace_x86_linux.go} | 3 +- .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 55 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 51 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 60 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 50 +- .../x/sys/unix/zsyscall_darwin_386.1_13.go | 40 + .../x/sys/unix/zsyscall_darwin_386.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_386.go | 1132 +- .../x/sys/unix/zsyscall_darwin_386.s | 290 + .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 40 + .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_amd64.go | 1132 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 290 + .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 40 + .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm.go | 1118 +- .../x/sys/unix/zsyscall_darwin_arm.s | 288 + .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 40 + .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm64.go | 1132 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 290 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 100 +- .../x/sys/unix/zsyscall_freebsd_386.go | 65 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 63 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 63 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 2016 + .../x/sys/unix/zsyscall_illumos_amd64.go | 102 + .../golang.org/x/sys/unix/zsyscall_linux.go | 1944 + .../x/sys/unix/zsyscall_linux_386.go | 1669 +- .../x/sys/unix/zsyscall_linux_amd64.go | 1686 +- .../x/sys/unix/zsyscall_linux_arm.go | 1688 +- .../x/sys/unix/zsyscall_linux_arm64.go | 1670 +- .../x/sys/unix/zsyscall_linux_mips.go | 1669 +- .../x/sys/unix/zsyscall_linux_mips64.go | 1687 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 1687 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 1669 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 1669 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 1669 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 1646 +- .../x/sys/unix/zsyscall_linux_s390x.go | 1669 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 1497 +- .../x/sys/unix/zsyscall_netbsd_386.go | 98 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 98 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 98 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 1862 + .../x/sys/unix/zsyscall_openbsd_386.go | 67 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 67 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 67 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 1693 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 1693 + .../x/sys/unix/zsyscall_solaris_amd64.go | 35 +- .../x/sys/unix/zsyscall_zos_s390x.go | 1217 + .../x/sys/unix/zsysctl_openbsd_386.go | 6 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 4 +- .../x/sys/unix/zsysctl_openbsd_arm.go | 6 +- .../x/sys/unix/zsysctl_openbsd_arm64.go | 276 + .../x/sys/unix/zsysctl_openbsd_mips64.go | 280 + .../x/sys/unix/zsysnum_darwin_386.go | 4 +- .../x/sys/unix/zsysnum_darwin_amd64.go | 8 +- .../x/sys/unix/zsysnum_darwin_arm.go | 4 +- .../x/sys/unix/zsysnum_darwin_arm64.go | 4 +- .../x/sys/unix/zsysnum_dragonfly_amd64.go | 378 +- .../x/sys/unix/zsysnum_freebsd_386.go | 456 +- .../x/sys/unix/zsysnum_freebsd_amd64.go | 456 +- .../x/sys/unix/zsysnum_freebsd_arm.go | 456 +- .../x/sys/unix/zsysnum_freebsd_arm64.go | 397 + .../x/sys/unix/zsysnum_linux_386.go | 817 +- .../x/sys/unix/zsysnum_linux_amd64.go | 21 +- .../x/sys/unix/zsysnum_linux_arm.go | 753 +- .../x/sys/unix/zsysnum_linux_arm64.go | 22 +- .../x/sys/unix/zsysnum_linux_mips.go | 787 +- .../x/sys/unix/zsysnum_linux_mips64.go | 21 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 21 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 787 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 32 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 32 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 22 +- .../x/sys/unix/zsysnum_linux_s390x.go | 35 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 40 +- .../x/sys/unix/zsysnum_netbsd_386.go | 5 +- .../x/sys/unix/zsysnum_netbsd_amd64.go | 5 +- .../x/sys/unix/zsysnum_netbsd_arm.go | 5 +- .../x/sys/unix/zsysnum_netbsd_arm64.go | 275 + .../x/sys/unix/zsysnum_openbsd_386.go | 217 +- .../x/sys/unix/zsysnum_openbsd_amd64.go | 217 +- .../x/sys/unix/zsysnum_openbsd_arm.go | 217 +- .../x/sys/unix/zsysnum_openbsd_arm64.go | 218 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 221 + .../x/sys/unix/zsysnum_zos_s390x.go | 2670 + .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 47 +- .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 52 +- .../x/sys/unix/ztypes_darwin_386.go | 80 +- .../x/sys/unix/ztypes_darwin_amd64.go | 99 +- .../x/sys/unix/ztypes_darwin_arm.go | 87 +- .../x/sys/unix/ztypes_darwin_arm64.go | 99 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 92 +- .../x/sys/unix/ztypes_freebsd_386.go | 229 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 229 +- .../x/sys/unix/ztypes_freebsd_arm.go | 194 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 701 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 3722 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 1732 +- .../x/sys/unix/ztypes_linux_amd64.go | 1746 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 1739 +- .../x/sys/unix/ztypes_linux_arm64.go | 1746 +- .../x/sys/unix/ztypes_linux_mips.go | 1736 +- .../x/sys/unix/ztypes_linux_mips64.go | 1747 +- .../x/sys/unix/ztypes_linux_mips64le.go | 1747 +- .../x/sys/unix/ztypes_linux_mipsle.go | 1736 +- .../x/sys/unix/ztypes_linux_ppc64.go | 1746 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 1746 +- .../x/sys/unix/ztypes_linux_riscv64.go | 1747 +- .../x/sys/unix/ztypes_linux_s390x.go | 1751 +- .../x/sys/unix/ztypes_linux_sparc64.go | 954 +- .../x/sys/unix/ztypes_netbsd_386.go | 69 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 76 +- .../x/sys/unix/ztypes_netbsd_arm.go | 75 +- .../x/sys/unix/ztypes_netbsd_arm64.go | 508 + .../x/sys/unix/ztypes_openbsd_386.go | 13 + .../x/sys/unix/ztypes_openbsd_amd64.go | 13 + .../x/sys/unix/ztypes_openbsd_arm.go | 115 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 567 + .../x/sys/unix/ztypes_openbsd_mips64.go | 567 + .../x/sys/unix/ztypes_solaris_amd64.go | 40 +- .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 402 + .../x/sys/windows/asm_windows_386.s | 13 - .../x/sys/windows/asm_windows_amd64.s | 13 - .../x/sys/windows/asm_windows_arm.s | 11 - .../golang.org/x/sys/windows/dll_windows.go | 60 +- vendor/golang.org/x/sys/windows/empty.s | 8 + .../golang.org/x/sys/windows/env_windows.go | 27 +- .../golang.org/x/sys/windows/exec_windows.go | 35 + .../x/sys/windows/memory_windows.go | 25 +- vendor/golang.org/x/sys/windows/mkerrors.bash | 70 + .../x/sys/windows/mkknownfolderids.bash | 27 + vendor/golang.org/x/sys/windows/mksyscall.go | 4 +- .../x/sys/windows/registry/export_test.go | 11 - .../x/sys/windows/registry/mksyscall.go | 4 +- .../x/sys/windows/registry/registry_test.go | 756 - .../x/sys/windows/registry/value.go | 20 +- .../sys/windows/registry/zsyscall_windows.go | 39 +- .../x/sys/windows/security_windows.go | 1023 +- vendor/golang.org/x/sys/windows/service.go | 82 +- .../x/sys/windows/setupapierrors_windows.go | 100 + .../golang.org/x/sys/windows/svc/debug/log.go | 56 - .../x/sys/windows/svc/debug/service.go | 45 - vendor/golang.org/x/sys/windows/svc/event.go | 48 - .../x/sys/windows/svc/eventlog/log_test.go | 51 - .../x/sys/windows/svc/example/beep.go | 22 - .../x/sys/windows/svc/example/install.go | 92 - .../x/sys/windows/svc/example/main.go | 76 - .../x/sys/windows/svc/example/manage.go | 62 - .../x/sys/windows/svc/example/service.go | 84 - vendor/golang.org/x/sys/windows/svc/go12.c | 24 - vendor/golang.org/x/sys/windows/svc/go12.go | 11 - vendor/golang.org/x/sys/windows/svc/go13.go | 31 - .../x/sys/windows/svc/mgr/config.go | 145 - .../golang.org/x/sys/windows/svc/mgr/mgr.go | 162 - .../x/sys/windows/svc/mgr/mgr_test.go | 282 - .../x/sys/windows/svc/mgr/recovery.go | 135 - .../x/sys/windows/svc/mgr/service.go | 72 - .../golang.org/x/sys/windows/svc/security.go | 62 - .../golang.org/x/sys/windows/svc/service.go | 363 - .../golang.org/x/sys/windows/svc/svc_test.go | 135 - vendor/golang.org/x/sys/windows/svc/sys_386.s | 68 - .../golang.org/x/sys/windows/svc/sys_amd64.s | 42 - vendor/golang.org/x/sys/windows/svc/sys_arm.s | 38 - vendor/golang.org/x/sys/windows/syscall.go | 46 +- .../golang.org/x/sys/windows/syscall_test.go | 53 - .../x/sys/windows/syscall_windows.go | 552 +- .../x/sys/windows/syscall_windows_test.go | 113 - .../golang.org/x/sys/windows/types_windows.go | 1467 +- .../x/sys/windows/types_windows_386.go | 13 + .../x/sys/windows/types_windows_amd64.go | 12 + .../x/sys/windows/types_windows_arm.go | 13 + .../x/sys/windows/types_windows_arm64.go | 34 + .../x/sys/windows/zerrors_windows.go | 9468 ++ .../x/sys/windows/zknownfolderids_windows.go | 149 + .../x/sys/windows/zsyscall_windows.go | 4491 +- vendor/golang.org/x/text/AUTHORS | 3 + vendor/golang.org/x/text/CONTRIBUTORS | 3 + vendor/golang.org/x/text/LICENSE | 27 + vendor/golang.org/x/text/PATENTS | 22 + .../golang.org/x/text/transform/transform.go | 709 + .../x/text/unicode/norm/composition.go | 512 + .../x/text/unicode/norm/forminfo.go | 278 + .../golang.org/x/text/unicode/norm/input.go | 109 + vendor/golang.org/x/text/unicode/norm/iter.go | 458 + .../x/text/unicode/norm/normalize.go | 609 + .../x/text/unicode/norm/readwriter.go | 125 + .../x/text/unicode/norm/tables10.0.0.go | 7657 ++ .../x/text/unicode/norm/tables11.0.0.go | 7693 ++ .../x/text/unicode/norm/tables12.0.0.go | 7710 ++ .../x/text/unicode/norm/tables13.0.0.go | 7760 ++ .../x/text/unicode/norm/tables9.0.0.go | 7637 ++ .../x/text/unicode/norm/transform.go | 88 + vendor/golang.org/x/text/unicode/norm/trie.go | 54 + vendor/google.golang.org/protobuf/AUTHORS | 3 + .../google.golang.org/protobuf/CONTRIBUTORS | 3 + vendor/google.golang.org/protobuf/LICENSE | 27 + vendor/google.golang.org/protobuf/PATENTS | 22 + .../protobuf/encoding/prototext/decode.go | 791 + .../protobuf/encoding/prototext/doc.go | 7 + .../protobuf/encoding/prototext/encode.go | 433 + .../protobuf/encoding/protowire/wire.go | 538 + .../protobuf/internal/descfmt/stringer.go | 316 + .../protobuf/internal/descopts/options.go | 29 + .../protobuf/internal/detrand/rand.go | 61 + .../internal/encoding/defval/default.go | 213 + .../encoding/messageset/messageset.go | 258 + .../protobuf/internal/encoding/tag/tag.go | 207 + .../protobuf/internal/encoding/text/decode.go | 665 + .../internal/encoding/text/decode_number.go | 190 + .../internal/encoding/text/decode_string.go | 161 + .../internal/encoding/text/decode_token.go | 373 + .../protobuf/internal/encoding/text/doc.go | 29 + .../protobuf/internal/encoding/text/encode.go | 267 + .../protobuf/internal/errors/errors.go | 89 + .../protobuf/internal/errors/is_go112.go | 39 + .../protobuf/internal/errors/is_go113.go | 12 + .../protobuf/internal/fieldsort/fieldsort.go | 40 + .../protobuf/internal/filedesc/build.go | 155 + .../protobuf/internal/filedesc/desc.go | 614 + .../protobuf/internal/filedesc/desc_init.go | 471 + .../protobuf/internal/filedesc/desc_lazy.go | 704 + .../protobuf/internal/filedesc/desc_list.go | 282 + .../internal/filedesc/desc_list_gen.go | 345 + .../protobuf/internal/filedesc/placeholder.go | 107 + .../protobuf/internal/filetype/build.go | 297 + .../protobuf/internal/flags/flags.go | 24 + .../internal/flags/proto_legacy_disable.go} | 6 +- .../internal/flags/proto_legacy_enable.go} | 6 +- .../protobuf/internal/genid/any_gen.go | 34 + .../protobuf/internal/genid/api_gen.go | 106 + .../protobuf/internal/genid/descriptor_gen.go | 829 + .../protobuf/internal/genid/doc.go | 11 + .../protobuf/internal/genid/duration_gen.go | 34 + .../protobuf/internal/genid/empty_gen.go | 19 + .../protobuf/internal/genid/field_mask_gen.go | 31 + .../protobuf/internal/genid/goname.go | 25 + .../protobuf/internal/genid/map_entry.go | 16 + .../internal/genid/source_context_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 116 + .../protobuf/internal/genid/timestamp_gen.go | 34 + .../protobuf/internal/genid/type_gen.go | 184 + .../protobuf/internal/genid/wrappers.go | 13 + .../protobuf/internal/genid/wrappers_gen.go | 175 + .../protobuf/internal/impl/api_export.go | 177 + .../protobuf/internal/impl/checkinit.go | 141 + .../protobuf/internal/impl/codec_extension.go | 223 + .../protobuf/internal/impl/codec_field.go | 828 + .../protobuf/internal/impl/codec_gen.go | 5637 + .../protobuf/internal/impl/codec_map.go | 389 + .../protobuf/internal/impl/codec_map_go111.go | 37 + .../protobuf/internal/impl/codec_map_go112.go | 11 + .../protobuf/internal/impl/codec_message.go | 159 + .../internal/impl/codec_messageset.go | 120 + .../protobuf/internal/impl/codec_reflect.go | 209 + .../protobuf/internal/impl/codec_tables.go | 557 + .../protobuf/internal/impl/codec_unsafe.go | 17 + .../protobuf/internal/impl/convert.go | 467 + .../protobuf/internal/impl/convert_list.go | 141 + .../protobuf/internal/impl/convert_map.go | 121 + .../protobuf/internal/impl/decode.go | 274 + .../protobuf/internal/impl/encode.go | 199 + .../protobuf/internal/impl/enum.go | 21 + .../protobuf/internal/impl/extension.go | 156 + .../protobuf/internal/impl/legacy_enum.go | 219 + .../protobuf/internal/impl/legacy_export.go | 92 + .../internal/impl/legacy_extension.go | 175 + .../protobuf/internal/impl/legacy_file.go | 81 + .../protobuf/internal/impl/legacy_message.go | 502 + .../protobuf/internal/impl/merge.go | 176 + .../protobuf/internal/impl/merge_gen.go | 209 + .../protobuf/internal/impl/message.go | 215 + .../protobuf/internal/impl/message_reflect.go | 364 + .../internal/impl/message_reflect_field.go | 466 + .../internal/impl/message_reflect_gen.go | 249 + .../protobuf/internal/impl/pointer_reflect.go | 177 + .../protobuf/internal/impl/pointer_unsafe.go | 173 + .../protobuf/internal/impl/validate.go | 576 + .../protobuf/internal/impl/weak.go | 74 + .../protobuf/internal/mapsort/mapsort.go | 43 + .../protobuf/internal/pragma/pragma.go | 29 + .../protobuf/internal/set/ints.go | 58 + .../protobuf/internal/strs/strings.go | 196 + .../protobuf/internal/strs/strings_pure.go | 27 + .../protobuf/internal/strs/strings_unsafe.go | 94 + .../protobuf/internal/version/version.go | 79 + .../protobuf/proto/checkinit.go | 71 + .../protobuf/proto/decode.go | 274 + .../protobuf/proto/decode_gen.go | 603 + .../google.golang.org/protobuf/proto/doc.go | 94 + .../protobuf/proto/encode.go | 346 + .../protobuf/proto/encode_gen.go | 97 + .../google.golang.org/protobuf/proto/equal.go | 154 + .../protobuf/proto/extension.go | 92 + .../google.golang.org/protobuf/proto/merge.go | 139 + .../protobuf/proto/messageset.go | 88 + .../google.golang.org/protobuf/proto/proto.go | 34 + .../protobuf/proto/proto_methods.go | 19 + .../protobuf/proto/proto_reflect.go | 19 + .../google.golang.org/protobuf/proto/reset.go | 43 + .../google.golang.org/protobuf/proto/size.go | 97 + .../protobuf/proto/size_gen.go | 55 + .../protobuf/proto/wrappers.go | 29 + .../protobuf/reflect/protoreflect/methods.go | 77 + .../protobuf/reflect/protoreflect/proto.go | 504 + .../protobuf/reflect/protoreflect/source.go | 52 + .../protobuf/reflect/protoreflect/type.go | 631 + .../protobuf/reflect/protoreflect/value.go | 285 + .../reflect/protoreflect/value_pure.go | 59 + .../reflect/protoreflect/value_union.go | 411 + .../reflect/protoreflect/value_unsafe.go | 98 + .../reflect/protoregistry/registry.go | 800 + .../protobuf/runtime/protoiface/legacy.go} | 12 +- .../protobuf/runtime/protoiface/methods.go | 167 + .../protobuf/runtime/protoimpl/impl.go | 44 + .../protobuf/runtime/protoimpl/version.go | 56 + .../protobuf/types/known/anypb/any.pb.go | 494 + .../types/known/durationpb/duration.pb.go | 379 + .../types/known/timestamppb/timestamp.pb.go | 381 + .../kingpin.v2/_examples/chat1/main.go | 20 - .../kingpin.v2/_examples/chat2/main.go | 38 - .../kingpin.v2/_examples/completion/main.go | 96 - .../kingpin.v2/_examples/curl/main.go | 105 - .../kingpin.v2/_examples/modular/main.go | 30 - .../kingpin.v2/_examples/ping/main.go | 20 - .../alecthomas/kingpin.v2/app_test.go | 404 - .../alecthomas/kingpin.v2/args_test.go | 84 - .../kingpin.v2/cmd/genvalues/main.go | 134 - .../alecthomas/kingpin.v2/cmd_test.go | 374 - .../alecthomas/kingpin.v2/completions_test.go | 78 - .../alecthomas/kingpin.v2/examples_test.go | 46 - .../alecthomas/kingpin.v2/flags_test.go | 368 - .../alecthomas/kingpin.v2/parser_test.go | 122 - .../alecthomas/kingpin.v2/parsers_test.go | 98 - .../alecthomas/kingpin.v2/usage_test.go | 65 - .../alecthomas/kingpin.v2/values_test.go | 98 - vendor/modules.txt | 182 + 2569 files changed, 221214 insertions(+), 419872 deletions(-) delete mode 100644 original.log delete mode 100644 vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md delete mode 100644 vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md create mode 100644 vendor/github.com/Shopify/sarama/.golangci.yml delete mode 100644 vendor/github.com/Shopify/sarama/.travis.yml delete mode 100644 vendor/github.com/Shopify/sarama/acl_create_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/acl_create_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/acl_delete_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/acl_delete_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/acl_describe_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/acl_describe_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/add_offsets_to_txn_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/add_offsets_to_txn_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/add_partitions_to_txn_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/add_partitions_to_txn_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/admin_test.go delete mode 100644 vendor/github.com/Shopify/sarama/alter_configs_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/alter_configs_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go create mode 100644 vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go delete mode 100644 vendor/github.com/Shopify/sarama/api_versions_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/api_versions_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/async_producer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/balance_strategy_test.go delete mode 100644 vendor/github.com/Shopify/sarama/broker_test.go delete mode 100644 vendor/github.com/Shopify/sarama/client_test.go delete mode 100644 vendor/github.com/Shopify/sarama/client_tls_test.go create mode 100644 vendor/github.com/Shopify/sarama/compress.go delete mode 100644 vendor/github.com/Shopify/sarama/config_test.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_group_members_test.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_group_test.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/consumer_test.go create mode 100644 vendor/github.com/Shopify/sarama/control_record.go delete mode 100644 vendor/github.com/Shopify/sarama/create_partitions_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/create_partitions_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/create_topics_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/create_topics_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/decompress.go delete mode 100644 vendor/github.com/Shopify/sarama/delete_groups_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/delete_groups_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/delete_records_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/delete_records_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/delete_topics_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/delete_topics_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/describe_configs_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/describe_configs_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/describe_groups_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/describe_groups_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/describe_log_dirs_request.go create mode 100644 vendor/github.com/Shopify/sarama/describe_log_dirs_response.go create mode 100644 vendor/github.com/Shopify/sarama/docker-compose.yml delete mode 100644 vendor/github.com/Shopify/sarama/end_txn_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/end_txn_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/examples/README.md delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/.gitignore delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/README.md delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/http_server.go delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go delete mode 100644 vendor/github.com/Shopify/sarama/fetch_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/fetch_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/find_coordinator_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/find_coordinator_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/functional_client_test.go delete mode 100644 vendor/github.com/Shopify/sarama/functional_consumer_group_test.go delete mode 100644 vendor/github.com/Shopify/sarama/functional_consumer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/functional_offset_manager_test.go delete mode 100644 vendor/github.com/Shopify/sarama/functional_producer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/functional_test.go create mode 100644 vendor/github.com/Shopify/sarama/go.mod create mode 100644 vendor/github.com/Shopify/sarama/go.sum create mode 100644 vendor/github.com/Shopify/sarama/gssapi_kerberos.go delete mode 100644 vendor/github.com/Shopify/sarama/heartbeat_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/heartbeat_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/init_producer_id_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/init_producer_id_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/interceptors.go delete mode 100644 vendor/github.com/Shopify/sarama/join_group_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/join_group_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/kerberos_client.go delete mode 100644 vendor/github.com/Shopify/sarama/leave_group_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/leave_group_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/list_groups_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/list_groups_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go create mode 100644 vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go delete mode 100644 vendor/github.com/Shopify/sarama/message_test.go delete mode 100644 vendor/github.com/Shopify/sarama/metadata_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/metadata_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/metrics_test.go create mode 100644 vendor/github.com/Shopify/sarama/mockkerberos.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/README.md delete mode 100644 vendor/github.com/Shopify/sarama/mocks/async_producer.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/async_producer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/consumer.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/consumer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/mocks.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/sync_producer.go delete mode 100644 vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_commit_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_commit_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_manager_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/offset_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/partitioner_test.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/produce_set_test.go delete mode 100644 vendor/github.com/Shopify/sarama/record_test.go delete mode 100644 vendor/github.com/Shopify/sarama/records_test.go delete mode 100644 vendor/github.com/Shopify/sarama/request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/response_header_test.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_authenticate_request.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_authenticate_response.go delete mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_group_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_group_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/sync_producer_test.go delete mode 100644 vendor/github.com/Shopify/sarama/tools/README.md delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go delete mode 100644 vendor/github.com/Shopify/sarama/txn_offset_commit_request_test.go delete mode 100644 vendor/github.com/Shopify/sarama/txn_offset_commit_response_test.go delete mode 100644 vendor/github.com/Shopify/sarama/utils_test.go delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/create_topics.sh delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/halt_cluster.sh delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/install_cluster.sh delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/kafka.conf delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/provision.sh delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/server.properties delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/setup_services.sh delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/toxiproxy.conf delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/zookeeper.properties create mode 100644 vendor/github.com/Shopify/sarama/zstd.go delete mode 100644 vendor/github.com/alecthomas/template/example_test.go delete mode 100644 vendor/github.com/alecthomas/template/examplefiles_test.go delete mode 100644 vendor/github.com/alecthomas/template/examplefunc_test.go delete mode 100644 vendor/github.com/alecthomas/template/exec_test.go create mode 100644 vendor/github.com/alecthomas/template/go.mod delete mode 100644 vendor/github.com/alecthomas/template/multi_test.go delete mode 100644 vendor/github.com/alecthomas/template/parse/lex_test.go delete mode 100644 vendor/github.com/alecthomas/template/parse/parse_test.go delete mode 100644 vendor/github.com/alecthomas/template/testdata/file1.tmpl delete mode 100644 vendor/github.com/alecthomas/template/testdata/file2.tmpl delete mode 100644 vendor/github.com/alecthomas/template/testdata/tmpl1.tmpl delete mode 100644 vendor/github.com/alecthomas/template/testdata/tmpl2.tmpl delete mode 100644 vendor/github.com/alecthomas/units/bytes_test.go create mode 100644 vendor/github.com/alecthomas/units/go.mod create mode 100644 vendor/github.com/alecthomas/units/go.sum delete mode 100644 vendor/github.com/beorn7/perks/.gitignore delete mode 100644 vendor/github.com/beorn7/perks/README.md delete mode 100644 vendor/github.com/beorn7/perks/histogram/bench_test.go delete mode 100644 vendor/github.com/beorn7/perks/histogram/histogram.go delete mode 100644 vendor/github.com/beorn7/perks/histogram/histogram_test.go delete mode 100644 vendor/github.com/beorn7/perks/quantile/bench_test.go delete mode 100644 vendor/github.com/beorn7/perks/quantile/example_test.go delete mode 100644 vendor/github.com/beorn7/perks/quantile/stream_test.go delete mode 100644 vendor/github.com/beorn7/perks/topk/topk.go delete mode 100644 vendor/github.com/beorn7/perks/topk/topk_test.go create mode 100644 vendor/github.com/cespare/xxhash/v2/.travis.yml create mode 100644 vendor/github.com/cespare/xxhash/v2/LICENSE.txt create mode 100644 vendor/github.com/cespare/xxhash/v2/README.md create mode 100644 vendor/github.com/cespare/xxhash/v2/go.mod rename vendor/github.com/{eapache/go-xerial-snappy/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709-1 => cespare/xxhash/v2/go.sum} (100%) create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_other.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_safe.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go delete mode 100644 vendor/github.com/davecgh/go-spew/.gitignore delete mode 100644 vendor/github.com/davecgh/go-spew/.travis.yml delete mode 100644 vendor/github.com/davecgh/go-spew/README.md delete mode 100644 vendor/github.com/davecgh/go-spew/cov_report.sh delete mode 100644 vendor/github.com/davecgh/go-spew/spew/common_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dump_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/example_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/format_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/internal_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/spew_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go delete mode 100644 vendor/github.com/davecgh/go-spew/test_coverage.txt delete mode 100644 vendor/github.com/eapache/go-resiliency/.gitignore delete mode 100644 vendor/github.com/eapache/go-resiliency/.travis.yml delete mode 100644 vendor/github.com/eapache/go-resiliency/CHANGELOG.md delete mode 100644 vendor/github.com/eapache/go-resiliency/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/batcher/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/batcher/batcher.go delete mode 100644 vendor/github.com/eapache/go-resiliency/batcher/batcher_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/deadline/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/deadline/deadline.go delete mode 100644 vendor/github.com/eapache/go-resiliency/deadline/deadline_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/backoffs.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/backoffs_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/classifier.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/classifier_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/retrier.go delete mode 100644 vendor/github.com/eapache/go-resiliency/retrier/retrier_test.go delete mode 100644 vendor/github.com/eapache/go-resiliency/semaphore/README.md delete mode 100644 vendor/github.com/eapache/go-resiliency/semaphore/semaphore.go delete mode 100644 vendor/github.com/eapache/go-resiliency/semaphore/semaphore_test.go delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/020dfb19a68cbcf99dc93dc1030068d4c9968ad0-2 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/05979b224be0294bf350310d4ba5257c9bb815db-3 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/0e64ca2823923c5efa03ff2bd6e0aa1018eeca3b-9 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/1 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/361a1c6d2a8f80780826c3d83ad391d0475c922f-4 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/4117af68228fa64339d362cf980c68ffadff96c8-12 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/4142249be82c8a617cf838eef05394ece39becd3-9 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/41ea8c7d904f1cd913b52e9ead4a96c639d76802-10 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/44083e1447694980c0ee682576e32358c9ee883f-2 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/4d6b359bd538feaa7d36c89235d07d0a443797ac-1 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/521e7e67b6063a75e0eeb24b0d1dd20731d34ad8-4 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/526e6f85d1b8777f0d9f70634c9f8b77fbdccdff-7 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/581b8fe7088f921567811fdf30e1f527c9f48e5e delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/60cd10738158020f5843b43960158c3d116b3a71-11 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/652b031b4b9d601235f86ef62523e63d733b8623-3 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/684a011f6fdfc7ae9863e12381165e82d2a2e356-9 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/72e42fc8e5eaed6a8a077f420fc3bd1f9a7c0919-1 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/80881d1b911b95e0203b3b0e7dc6360c35f7620f-7 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/8484b3082d522e0a1f315db1fa1b2a5118be7cc3-8 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/9635bb09260f100bc4a2ee4e3b980fecc5b874ce-1 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/99d36b0b5b1be7151a508dd440ec725a2576c41c-1 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/9d339eddb4e2714ea319c3fb571311cb95fdb067-6 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/b2419fcb7a9aef359de67cb6bd2b8a8c1f5c100f-4 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/c1951b29109ec1017f63535ce3699630f46f54e1-5 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/cb806bc4f67316af02d6ae677332a3b6005a18da-5 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/cd7dd228703739e9252c7ea76f1c5f82ab44686a-10 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/ce3671e91907349cea04fc3f2a4b91c65b99461d-3 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/ce3c6f4c31f74d72fbf74c17d14a8d29aa62059e-6 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/e2230aa0ecaebb9b890440effa13f501a89247b2-1 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/efa11d676fb2a77afb8eac3d7ed30e330a7c2efe-11 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/f0445ac39e03978bbc8011316ac8468015ddb72c-1 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/corpus/f241da53c6bc1fe3368c55bf28db86ce15a2c784-2 delete mode 100644 vendor/github.com/eapache/go-xerial-snappy/snappy_test.go delete mode 100644 vendor/github.com/eapache/queue/queue_test.go delete mode 100644 vendor/github.com/golang/protobuf/.github/ISSUE_TEMPLATE/bug_report.md delete mode 100644 vendor/github.com/golang/protobuf/.github/ISSUE_TEMPLATE/feature_request.md delete mode 100644 vendor/github.com/golang/protobuf/.github/ISSUE_TEMPLATE/question.md delete mode 100644 vendor/github.com/golang/protobuf/.gitignore delete mode 100644 vendor/github.com/golang/protobuf/.travis.yml delete mode 100644 vendor/github.com/golang/protobuf/Makefile delete mode 100644 vendor/github.com/golang/protobuf/README.md delete mode 100644 vendor/github.com/golang/protobuf/conformance/Makefile delete mode 100644 vendor/github.com/golang/protobuf/conformance/conformance.go delete mode 100755 vendor/github.com/golang/protobuf/conformance/conformance.sh delete mode 100644 vendor/github.com/golang/protobuf/conformance/failure_list_go.txt delete mode 100644 vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.pb.go delete mode 100644 vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.proto delete mode 100755 vendor/github.com/golang/protobuf/conformance/test.sh delete mode 100644 vendor/github.com/golang/protobuf/descriptor/descriptor.go delete mode 100644 vendor/github.com/golang/protobuf/descriptor/descriptor_test.go delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb.go delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto delete mode 100644 vendor/github.com/golang/protobuf/proto/all_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/any_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go delete mode 100644 vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/github.com/golang/protobuf/proto/clone_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go create mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/golang/protobuf/proto/discard_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/extensions_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/github.com/golang/protobuf/proto/map_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go delete mode 100644 vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go delete mode 100644 vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto delete mode 100644 vendor/github.com/golang/protobuf/proto/proto3_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go delete mode 100644 vendor/github.com/golang/protobuf/proto/size2_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/size_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/test_proto/test.pb.go delete mode 100644 vendor/github.com/golang/protobuf/proto/test_proto/test.proto delete mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser_test.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/doc.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap_test.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/golden_test.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/main.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public_test.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any_test.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration_test.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp_test.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto delete mode 100755 vendor/github.com/golang/protobuf/regenerate.sh delete mode 100644 vendor/github.com/golang/snappy/cmd/snappytool/main.go create mode 100644 vendor/github.com/golang/snappy/decode_arm64.s create mode 100644 vendor/github.com/golang/snappy/decode_asm.go create mode 100644 vendor/github.com/golang/snappy/encode_arm64.s create mode 100644 vendor/github.com/golang/snappy/encode_asm.go create mode 100644 vendor/github.com/golang/snappy/go.mod delete mode 100644 vendor/github.com/golang/snappy/golden_test.go delete mode 100644 vendor/github.com/golang/snappy/misc/main.cpp delete mode 100644 vendor/github.com/golang/snappy/snappy_test.go delete mode 100644 vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt delete mode 100644 vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy create mode 100644 vendor/github.com/hashicorp/go-uuid/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-uuid/LICENSE create mode 100644 vendor/github.com/hashicorp/go-uuid/README.md create mode 100644 vendor/github.com/hashicorp/go-uuid/go.mod create mode 100644 vendor/github.com/hashicorp/go-uuid/uuid.go create mode 100644 vendor/github.com/jcmturner/aescts/v2/LICENSE create mode 100644 vendor/github.com/jcmturner/aescts/v2/aescts.go create mode 100644 vendor/github.com/jcmturner/aescts/v2/go.mod create mode 100644 vendor/github.com/jcmturner/aescts/v2/go.sum create mode 100644 vendor/github.com/jcmturner/dnsutils/v2/LICENSE create mode 100644 vendor/github.com/jcmturner/dnsutils/v2/go.mod create mode 100644 vendor/github.com/jcmturner/dnsutils/v2/go.sum create mode 100644 vendor/github.com/jcmturner/dnsutils/v2/srv.go create mode 100644 vendor/github.com/jcmturner/gofork/LICENSE create mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/README.md create mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go create mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/common.go create mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go create mode 100644 vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/LICENSE create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/asn1tools/tools.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/ASExchange.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/cache.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/client.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/network.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/passwd.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/session.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/client/settings.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/config/error.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/config/hosts.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/credentials/credentials.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha1-96.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha256-128.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha1-96.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha384-192.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/common/common.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/crypto.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/des3-cbc-sha1-kd.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rc4-hmac.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/encryption.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/keyDerivation.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/nfold.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/encryption.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/keyDerivation.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/checksum.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/encryption.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/keyDerivation.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/msgtype.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/encryption.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/keyDerivation.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/gssapi/MICToken.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/gssapi/README.md create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/gssapi/contextFlags.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/gssapi/gssapi.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/gssapi/wrapToken.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/addrtype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/adtype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/asnAppTag/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/chksumtype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/errorcode/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/etypeID/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/flags/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/keyusage/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/msgtype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/nametype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/iana/patype/constants.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/kadmin/changepasswddata.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/kadmin/message.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/kadmin/passwd.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/krberror/error.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/APRep.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/APReq.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KDCRep.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KDCReq.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KRBCred.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KRBError.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KRBPriv.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/KRBSafe.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/messages/Ticket.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/client_claims.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/client_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/credentials_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/device_claims.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/device_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/kerb_validation_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/pac_type.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/s4u_delegation_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/signature_data.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/pac/upn_dns_info.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/Cryptosystem.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/HostAddress.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/PAData.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/PrincipalName.go create mode 100644 vendor/github.com/jcmturner/gokrb5/v8/types/TypedData.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/LICENSE create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/claims.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/common.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/filetime.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/group_membership.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/kerb_sid_and_attributes.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/reader.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/rpc_unicode_string.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/sid.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/error.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/header.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/strings.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/tags.go create mode 100644 vendor/github.com/jcmturner/rpc/v2/ndr/union.go create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/snappy/.gitignore create mode 100644 vendor/github.com/klauspost/compress/snappy/AUTHORS create mode 100644 vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/klauspost/compress/snappy/LICENSE create mode 100644 vendor/github.com/klauspost/compress/snappy/README create mode 100644 vendor/github.com/klauspost/compress/snappy/decode.go rename vendor/github.com/{golang => klauspost/compress}/snappy/decode_amd64.go (100%) create mode 100644 vendor/github.com/klauspost/compress/snappy/decode_amd64.s create mode 100644 vendor/github.com/klauspost/compress/snappy/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/snappy/encode.go rename vendor/github.com/{golang => klauspost/compress}/snappy/encode_amd64.go (100%) create mode 100644 vendor/github.com/klauspost/compress/snappy/encode_amd64.s create mode 100644 vendor/github.com/klauspost/compress/snappy/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/snappy/runbench.cmd create mode 100644 vendor/github.com/klauspost/compress/snappy/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go create mode 100644 vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go delete mode 100644 vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_test.go delete mode 100644 vendor/github.com/krallistic/kazoo-go/functional_cluster_test.go delete mode 100644 vendor/github.com/krallistic/kazoo-go/functional_consumergroup_test.go delete mode 100644 vendor/github.com/krallistic/kazoo-go/functional_topic_admin_test.go delete mode 100644 vendor/github.com/krallistic/kazoo-go/functional_topic_metadata_test.go delete mode 100644 vendor/github.com/krallistic/kazoo-go/kazoo_test.go delete mode 100644 vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/.gitignore delete mode 100644 vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/kafka-topics.go delete mode 100644 vendor/github.com/krallistic/kazoo-go/topic_metadata_test.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/README.md delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/ext/moved.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbtest/deleted.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto delete mode 100644 vendor/github.com/pierrec/lz4/bench_test.go delete mode 100644 vendor/github.com/pierrec/lz4/block_test.go create mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.go create mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.s create mode 100644 vendor/github.com/pierrec/lz4/decode_other.go create mode 100644 vendor/github.com/pierrec/lz4/errors.go delete mode 100644 vendor/github.com/pierrec/lz4/export_test.go delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/01572067d493db8dc8161f05c339a5192b0b4087-22 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/02766f768fbfbd81b752cce427eb5242a44929cc-5 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/032f04032e12567057782672bb12670c20d38439-10 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/0367b985641aca66e6e4eeea68acf5e2a02c62a8-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/03e85abc49352b2f7cc83efd7e4274da02d78b84-6 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/049f82a81bb6b4d7cf69fac5e413f6ce299d48cf-8 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/04c05c7956f17e57a91a47909bd0706135cf17a6-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/050e2af2a57d8044139ba21375f0ac6fcb7ab0b1-12 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/0519d86e62cc577b98e9a4836b071ba1692c7674-30 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/0547c73efb9b6a345fd9a52aa0798b48dd9aca62-2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/05aae2cf8756f66066cf623618042ebaa92ec745-14 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/0608f9eba5e6fd4d70241a81a6950ca51d78eb64-33 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/07fe3e792f0d2862dccc04db22c0e4aef4d41b49-6 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/0990ac54decbca1a97893e83c7feb2be89cb10ea-14 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/09f2eda28ecc97304659afded4d13a188baf2107-22 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/0a4ff2ab3a01888686c5bc358b72be108bbb4721-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/0a7fddf3c8aa1c781223748129c9dc0807de3a6b-28 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/0b5bec228930b2cfcda3be9a39107a6bc8044f1e-3 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/0ca5fd3841a6777873c7ef26f65a384e7b15d065-18 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/0ce9c3bac93df0ea1f6343d223d5220f9eb2383a-8 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/0cf885cd35e7124005b0ba0c3c4431ddfaeff84d-11 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/0d7c02d4e91d82b0355baaca1237062639442db6-3 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/0e1b2b0c49dfb86fe01d3453dd24e39482e132e8-7 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/1.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/10.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/106b9d718c97bb7c872847d3070a570e99d9fa3e-22 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/10fa5d9f0fe75f73c0e92a1fe1c00f0041ec8f39-24 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/11.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/113a12cbb28b83fcee714d58c35bbf52c0740e90-7 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/12.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/1288161f8ce422490f63f257ce7338ef90fb8827-15 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/13.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/136f7224ae337a61df2e72b80af8b1aaa5933af3-10 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/13c3c26f7a34d01fc89c92ca8ba2ba5ae430c225-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/13db64707d1ea3070b4a37b6c1291d6125acbbd3-10 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/14.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/14193748a7b6cda204b11d042a35635151e90dbb-20 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/142d4f8cb427dd3562d72d889dfc0ea3a2b03d98-22 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/15.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/15663b854e9a4f193502ea6463dae38b4d8fca90-19 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/15e223354eb5378a7ee74a41dfab28ffc895ca33-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/16.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/17.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/177c1c68fead4507aa47dd2455fd17a10ceda5ea-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/17871030a73ac4d12ada652948135cb4639d679c-34 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/18.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/180a2772b126d31abcb3ef692a14b13cf47f103e-17 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/19.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/191e0dd24b8c7f8babeae4839768df39acc17eb1-17 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/1971e6ed6c6f6069fc2a9ed3038101e89bbcc381-26 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/1a582381781f264f551bd6f0f2284a931147e6d9-4 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/1a58f02dc83ac8315a85babdea6d757cbff2bb03-30 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/1a5a08b67764facaad851b9f1cbc5cfb31b7fb56-29 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/1c2781a1ffae4059ce3e93a55ec8d8cbf8bdecdf-22 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/1c944d5065b1a2b30e412604a14aa52565a5765b-35 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/1d37fb332301cf7de0bd51a8c1aa9be4935e89fc-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/1d6b87b52e62cb84be834478ad88129f5e1f247b-9 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/1ec2f11a8d8b9cf188a58f673a0b4a8608a926ca-3 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/1fc2ba0bb981fec47badea1c80219452c9e3c76c-22 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/1fd8444ac43541c44a1c6ed8df2f688b1fa09681-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/2.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/20.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/202a9c8b188cae90f29bce3bf0438a035c504eb4-20 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2065ba3177c7dc5047742faa7158b3faeaac1f3c-32 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/20cf0057443ecb322ff1169ecbe6cf20250f15af-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/20d1a26afe563ad77e7a95fbee6ff59ebf3e61ab-13 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/21.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/21c8be1bb9eeea5b141500dee4987ab7fbd40d4a-23 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/22.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/2201e32d052c15874f0323a09c330f3666029a72-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/226780b32ba8f87ec614fdb376aa0884011c4ca9-17 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/22897c61698649d7570de91613afdc19b66e6965-20 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/23.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/234cc427d9be32470f3c2e11a6bc16567f558e55-22 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/24.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2486a84bf0f161f45b050d9c19ea9e35f5def864-8 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/25.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/25252b16cd4afa8ef86122448688c7095684c86b-12 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/26.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/263fb3d738b862ec4050e5a9fbabfbd99cb0d9a5-16 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/27.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/276580343a14eec04143e89a778dae3e14df472c-17 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/27fb5dc4016dc640e55a60719a222c38c604fa6b-2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/28.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/29.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/2a08d7c56ff9959698688f19ddd2e1e4d4651270-3 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2a33d8514fb512aa20b0a56800cd3e12f3952b6b-26 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/2a52400dd3aa2d2a40657d1e51c47c1929912927-3 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2ab005ac79cd4dada693dd2a747c001898d45e1e-16 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/2b39aa66ecfac58e61185c9664a968233931496a-9 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2c2a5947341d76797a7e2299f39d01e3aebb2eb8-19 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2cc2308b75a2e8f7eafcf69370767e5fce314892-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2cdafdadb156e2759c389b6b8edf6a402034886c-26 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/2d7f0171116eec9984eaa9138e1312e90a7d67ee-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2de93224b5f0db491ced1ec491a9f41d71820671-11 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2e8487cf61feda70c0d74f12bfb5b692b684f82a-9 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2f0ee9cf4bb951a37efc6460d5709442bc3de54e-6 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2f1ba7fe1cd90a4023706a2ea9c7c9dca8128119-30 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/2fad20024167a500cdb8df5334a614f113efae00-20 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/3.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/30.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/300579a548d96d64c9da8470efa15e787f1a36f1-28 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/31.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/31c6c22708d346ed9e936fa7e77c8d9ab6da8d1e-33 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/32.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/33.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/34.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/344d38ec2ec90cb617e809439938b4cbf3b11f02-10 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/35.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/352631eab692c4a2c378b231fb3407ebcc0c3039-33 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/36.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/363d4559cac10516289fe1b6029590c4c7a6d8eb-5 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/37.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/3771c6e8ea0f20350dae0180a9b14e36b8aef244-22 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/37ee7fab504f2d2039753d73dd0290c884bd57bf-8 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/38.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/39.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/396101a712463bb336a18f4096fc3eb5923600c1-10 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/396146e06d3a4b2468d080f89ab5862348073424-28 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/397127b75cb59b253ed49206082b0428b6b23d02-17 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/39ccf446395ef707cf92a04b5508deda399372c2-15 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/3b6fd6da48bb34284390a75e22940e7234dbbd28-34 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/3de3c5c394a3cf05620bb80871a1f10e9e36f25b-8 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/3dee65f1cf51dfe2e5be498150ce22d2ac5a07fd-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/3e34341fb51769fd9d948bdd20c011e335b145f4-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/3ee211efb3d5d8058cd9a8c59e40c8d0f7a3df53-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/4.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/40.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/405726718b3f54a0cfae1666f06d3cc1ee747104-14 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/407188676d45d6f9dd5f3c84e7df0e763c7cca57-22 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/408ac1a4a83e082e848c208eed903930d81e81b6-17 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/41.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4114fd99aaa4dc95365dc4bbcb3c9a8a03434a5a-29 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4131f155339a3476898088b065b8588a2b28278e-26 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/413e39442f005279560ddad02bbdd1a05c9f0eaf-4 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/41b7eaf8892043eccf381ccbc46ab024eb9c503c-4 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/42.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4208b7fe7ac3a530c159a1c8fd09dd3078b5650f-15 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/421bd1daa317c5d67fa21879de29d062c342294b-5 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/42544ff3318fe86dd466e9a05068e752a1057fcc-32 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/42b056f9dac9cc658c80092e490b3dbcd436e3f8-15 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/43.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/432c09281c46537c98864bc7d601780562b68410-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/44.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/446dc91ff0ddc34c3b02f741e3f6f079a4dfcae8-17 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/45.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/451831159c1afb87077066147630b4b6caeb54c3-11 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/46.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/47.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/48.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/49.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/49861b3d9bca3e2857d806aaecaac09af4bff1dd-2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/49a3ead0ad96e8da5a4c8f89bd140e1d8af8995a-17 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4a14a3883f5c8819405319e8fb96234f5746a0ef-22 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4a625a4b4f3069707e88f16db88e993dabc41aa2-27 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/4a6464c2aba2492f5122856de7ac451994eadda4-10 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4b0ab2fc1fdfc56066c5c1f2751b292f4ddc557e-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4b55f37e6637f4246a41caa490da4bec632379d4-7 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/4bb422b835278e4aca92d076331d9c8cc5752345-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4bd00d26b893ce064dad6e771f30541b541d43b9-18 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4cde5adc216a29fff2ec39e23ccc6fca80cd4a15-21 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4d1b64babe1f045b8374f4d74949622591546eb5-17 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4d49686993529cfe29473c50b9b0fb2b6ea4f6bf-13 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/4ea726d6736026a733707e695d9c2cdc83efc05b-5 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/4ef3e6d20ccec24376a526ab9ec9f6f2cc604129-25 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/5.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/50.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/50a87eb0c097a7ebf7f1bf3be2c6a7dbe6b6c5c3-23 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/50e3ac1126c605158726db6f2cca3120f99b8e73-22 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/51.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/51075c34f23d161fb97edcf6f1b73ee6005009a0-28 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/512ed5fb4e92818b75bd7633f58d6ca5340ffd94-27 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/514a62216c761adf23d946f11c0d1a0410990641-3 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/516d84c21ac984bd1cae56910d71b62e39610c5d-29 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/517d39f406222f0a0121b7a1961953204674c251-33 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/52.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/53.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/54.bz2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/5431cabbc58d8dc143ece079de40300c1ce6e101-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/55700385089e16e44968ea410c6b90206b16d72a-14 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/55b9a902445e2bfa2f0f37d630779d329eeda20e-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/5620a492eaf067734e5b8b64517b28ec3beaa97e-12 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/5699fea659964d8ab94069d08b0b97834c0a42df-2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/5765fc21629571e51adf2fc2bc8b64541a1ea08d-18 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/5768ea5d1911143f4b1c0585b9b864ebe16aa004-12 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/57b780437f4abf2d5cba0775bf802a4dfdb067d6-25 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/58f53d40265c9a49c0d3b4292cb637464a4e376a-17 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/59b254c3565c9eed2bc93385b821da897afcbb15-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/5a962e3d6a128983afe9ea78a28cce0f40a790c0-14 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/5af52ef91b6f717ffdd805585e24806407e9621b-14 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/5b01aeb030dc1dc9568fd32f1647d92f0692a411-6 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/5bbd27cea704a4e6ff3f42f4792a91eb7839bc0d-12 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/5bd895c23369df9505dd99ffcd035dc5e897264b-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/5bfd84d7b2ba6b6325d5135fb0a9ae1ec5d7d3e1-2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/5c4f347c3567baf700dfccf49a91192c83b89da2-8 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/5dd8001f8a87c24f866074c36b6b80f42b298ff0-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/5ddf63d61aa38da1d409e37b301e0fe5a207a051-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/5e19e298d051aac48b7683dc24577b46268b630c-35 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/5e54c67050ee8583c7453ff13d6eec15b2255288-20 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/5f946423d1138924933334c6e5d3eb13e1020e9c-33 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/5fbebd9edd144c4b9869ed4ab40c7cc3c46a4a8f-4 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/6.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/6046b14dd1f6925bcfe470a8484353f525db6a9c-19 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/608a9993a51ec7bf252ac76b163def5f7002d2e4-4 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/610d8dc3cf4012e4e2d070988b0720285a4c361e-7 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/61b196987682fb64ef9c4ff37532bf9b2ac201bc-14 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/626f8b6efa3ea0f254789fe6cf52f6e52538f357-25 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/6277f2e0a6df2ac61660ee1965c690b87c26b556-7 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/62c738f00c488f493989b2037d9cf1781f0bbd40-11 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/631ffa88df9713a124b3ba6c704c0c75727af2ff-6 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/633df0cd78621cd45067a58d23c6ed67bb1b60cb-31 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/635d5de257a1910a7fd0db2e567edfa348e47270-11 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/64c500b5addcbf8c673188a1477e4159851ae04f-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/660387064a3cf4cb81046989929abe1b4fbfc815-17 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/66068a7e7bdfd1038a84aeb3dec6e3cb4d17ad57-2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/66c34847568ac9cb3ccbb8be26f494988a3e0628-7 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/67534dbd68040fb9a8867e6af384d33ea323758b-29 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/67ab3037ff49f082a877224d68e35069cc4d45eb-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/68612136c2017f9caf87122155f82a25f57c2d2a-32 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/6981397d97c481e39d563d43916377fb3c74c60e-28 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/69c2accb74456005e2a9bbef15ccad3d076f2124-28 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/69dcc80940a26844b0afe7898fea9cf68b698214-4 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/69fcd886042d5c3ebe89afd561782ac25619e35b-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/6a04b54e1511633ec895326b4e043e186fa5693b-29 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/6a3e8935204dcd3dc48a1ff7415c305f0e5863aa-9 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/6b351674a45f2d9be602fe8d3fb84229551b4ce3-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/6b72fdd9989971ecc3b50c34ee420f56a03e1026-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/6b7f4ac7aa8b357dee3067d7a60143c03b54bb8d-16 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/6bc138796e9b80572a6cb1b4a7ba30c97c22359d-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/6e14a407faae939957b80e641a836735bbdcad5a-2 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/6f24be0bcac848e4e5b4b85bc60f70f12388a5ed-4 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/7.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7102c7f297296821114661e00e5bf54d0891d105-21 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7151692dfebfc82876676e65ee9b807d83a3df54-22 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/71a24ce771fb7f1a4163e57a478c3044ad42e62d-24 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/72c738d7492d3055c6fe7391198422984b9e4702-32 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/72f032947602f1be74f01c91165c5118121f36c7-24 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/73b6bd1462a0521b4bf76abb1fd80df6e180dc80-17 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/73c81fef0997a4929b303e02a99f3977870f2013-29 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/73efed803abadf6167fc3f04e0674cc39c30f6af-21 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7603f5f266de813608c4cc1ccd1c798ef8065c5c-23 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/764571571e4d46f4397ed534d0160718ce578da4-26 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/767d1943125a0f6e9397779cc757c9cdd1e05631-17 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/76d22068e2ed4a5952d4adc7ea8dada5509a784c-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7740102922cb9933980bb800c1115daf38edf654-24 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/783270b1e353ba3895b7d0c4135b8592e22f6508-12 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/7851a406571c6b4c1aeed0af16db8c48444c3f2b-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/78981d313038119ac4f7017349e50a1cba56b382-7 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/78c88c4afaf5962056b1aea720509b9f6f286b91-15 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/78e59daada9b9be755d1b508dd392fa9fc6fa9c2-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/78ef686662a059f053f80c1c63c2921deff073fb-31 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/79c5ac978f5aee35e123f523369aa46b1d0a995d-11 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7a0fc8dacceae32a59589711dce63800085c22c7-23 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7adf4aa021efaa953268c817467959fa3c42ca42-13 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/7b8c99ded96973a6e8f523bc1c6ed4ef5c515aa1-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7b919213d591e6ce4355c635dc1ecc0d8e78befe-30 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7ba80199cbce9a2eb47da15f0c62fd1fb8fa67d9-3 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7cdc0917ad63ce7a7c98301a366c31635f0f099d-14 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/7ce37ad19bfe9f52eeadda03e6b8448e5bf57800-3 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7e3132012be223fd55e5e7a7fc2ea602361ed2b4-5 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7e9a88118e4c41e61f5c501e6edf9a5bd2432be3-23 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/7f081c89cfb6344f4aac5f813da1fd15f8bab022-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7f8c3b163798c8d5e1b65e03f411b56b6c9384bb-28 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/7f970f16026c689c096a19fef1a3282a13ee69dc-20 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/7fa96d28faf45062eb803ea84a334b607e966f90-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/8.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/8261f0c1799ca71c411f6d3f34069b25dac8b739-18 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/82a499521f34b6a9aff3b71d5f8bfd358933a4b2-36 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/82afa534de59025bf1e3358919286525ae7d3347-2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/82c627991d65c5c4e88c9ccac39be082cca40765-24 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/8435aa58e67c4de798375b44c11bffa5b680f615-32 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/8496965f7aa6cea3e080dbfb911a7034e6623cb7-10 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/84a9bda8369d33ffe0d6f520c24331ae64e9dc88-3 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/86513e3435adaf7c493dd50eb5de372010185e36-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/86637b211f4fa0118ccab9ee193c66286126bb5d-20 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/8695984335fa005895377a8a60000a921d7efd99-10 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/86baa53eb98a9a342b0d5b79dfa5c58aa9c1b05e-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/87caf7737ebb025ec2d908224818ceb2bc76b658-28 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/88e6e46ab1ec92ce694b8d4c3d816491169d2bb6-10 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/89216c662a46d50f37cfa08963acad8c6f7aace7-11 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/8d70b7de160bbef22ab46f798d687a69dbda772c-5 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/8e533f8a1e58710d99d6b7d39af7034961aa4fbe-5 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/8f0d2862c49eebbcd473a38c8fa1e76288f47127-26 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/8f4788d30edd22ebcfef0e52bbf9e8c3d1e8d7e9-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/8f61ea021e02cc609baafbdf714b9577e4bcb05f-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/8f7a47710904981ffaa1fefa21fa95fd2d818487-7 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/9.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/90a227d3beab730ed6eecd63657f5406beccabdf-12 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/92197169aded0d5d0407e3925959e922257a101d-28 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/924e17974cd194fa756d23394676d37cc3641f64-17 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/92a785b5ea93d36e27029e281e9a34377d81ce55-5 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/92d41e4fca52311e848fac274144b6293d9260f7-34 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/92fda3aa2adbe37ff690c59939ca1e1b2a8a7936-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/9363b81db6b35e8beebcc32d560f786472829bd8-21 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/948b1ce043c82d0cfbaa910b6989a1b35a19b8ae-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/9505b43fcbc3139441e35bdaaec138e28af076f6-25 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/951bb02c199adb52e9e300e9fc070bf55980b910-14 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/955404fe3f375361f5c3be1dbcd28eb9a28f06e4-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/955c823909722e2693dd7cea3eadc17833dddf86-24 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/95ca8da5556065f33b46c2c8186c2f1cebb1b5da-29 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/967e50c6c1bc99aa5e7fa07c2de14564f52b0fd3-20 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/96c9a1fa8b0184ad486f8f68a9ddc88434579080-30 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/96cc45abef3bc9fb6659714b9743cda92ec0abb9-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/9719ea029fdf8c837f991ac3548145485cc1f06e-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/984480af27d1640fd02f40e736ffcde3a91e4abb-22 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/98d40a50ee58c05727777e242ecbc0d4e214f7fe-21 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/9915e9bb007bc2c1f3d346123933923279f0dec1-27 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/992413e17d64968cb04af34c7761182f20fc97b6-2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/995d50f1cb750cbf038246d6cb0cf8db11d7e60e-33 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/99cfa74a1fea5d16168dd9efc720425b85e95eb7-15 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/9a552bab72f174ede3b9bdb7a663c963fd1463d3-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/9a5ab6c72a445b3b27129004d2a1a417cd4d8440-26 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/9aa3050cb38a6ad276cb5e5ca0c4776d92cb7b0f-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/9be44693435bc6c51980f30418bcc690d8c25fe7-6 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/9c0420bf00f888487d543f42fc48b407c65d4717-17 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/9ca2a086f1f08c7dec54d52425bd72f17c11056e-21 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/9db70b1edad2317d94dcaafe7f5c5e3145084167-12 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/9e160ae007fc11092a3fd877ebe706c4d841db49-19 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/Mark.Twain-Tom.Sawyer.txt.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a01e13c3e401957031defb62b05434c65b01d5c4-10 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a059044bdb0402471dbe9aaaa555a063a6bc1e6a-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a06b1a08fcda463f1d51c485b0e7271ff9048b41-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a0f3d67e96968a267366be380147cbc7b17e5b2b-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a18d849dc2a98c4ebb6000b2cc853f21fb64d9e5-24 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a2e5916be780e35e9ecb7c42be52dd5e134f3363-25 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a33252a74974fc86df30c311d501a1f363d350cd-12 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a462f03ee666a20244d3331e3635b7eb796d906d-15 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/a56e983782e49f8267a61d4375e98b1a862862ac-9 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/a58a9f9caca5e73b4296b931201a5ea870974c26-15 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/a628194a08ff63e98625b1786175026c5f02c716-5 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a64f2336fd4a9ec8153b95f40c383e1ecfed9e73-25 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/a6a5682a6663e0c548c9e5acbad4958e2c256b32-7 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a6dbaac639f3b82609ec27c80fbd003684c28867-21 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a8c6a4509b61d8baa71f59f9e1eb95712b10626c-23 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a97d9bf241e8ec73f99205b32c24fcd64194f0b9-8 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/a9e348d9896cc740f7e910d0a70c080adb65cc77-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/aa04575587509ffc65a6b0224d24ad1125cb0f63-26 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/aa290b4dcc8198945311c8149fc1252f14555e70-15 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/aabb8fa4913c79f0a42494ad2215a32927adbd45-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ac7077c5220abe6cd481318c42dfe6cb2cb2c666-10 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/acbef0322169a93c7421902883cc8057675c953b-26 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/aec95871bc7d87cae16c36a0d30955b43076aec5-17 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/b20e3f27f4e8d41f16124881f92546f0fb2edc16-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b27fb21ecbe6e77c91341738621ad7092c29bca5-17 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b38ce47b707326024fb24860c4365d58ab9f3528-29 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/b3eaea244bd47b64c8de3d81c7b5e94e421d7f32-5 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b3fd355dc090a732d5cf3b25151f165ea901a682-24 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b53101ec4348e9c329c13e22790ffde246743030-35 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b58429fd1107617191026029cf327b2ebed963bb-18 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b58846d79a8dc960a718ef88dd3a06ad49b1fe72-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b5b5b895b4619fa039ea99520b9947de2996c38f-6 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b6aca5c55295d93491e47817f46ca372c9078cec-3 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b6ddb90092b3087158dc32669529db2012f14c3c-7 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/b6e7a519d013ddb67313af02a9ce966877949487-4 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b71a5a7c576e5cc5ba23845d352b2af16737c03c-7 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b7815c3b5649d9a367ba99e7e09cf1f251ab6f83-18 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b7a5b15c9e2d4d659d421de8e3b463200f71f1ec-23 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/b83b3d04ada1403578065d7f10aa7441830dea3c-11 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b92c70d3f12e67c69ba5db9ad491b7a4e075ece8-7 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/b94b7ebc6d153e0c99a97864f58b26f7192f66a5-20 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/ba98469ede70309f18893f0ff95380f5a0486fcd-6 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/bc0c31f304c1a1f8be0c8a0d9daa3b8aa1f23799-14 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/bc3ac4aae07cba8d7f657a8739d1774e44bde613-31 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/bc650b6a5356c1935f64f6fb755e43bc5f5187c4-26 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/bdc123d9da19a7ae0ff87ca0741002fbd8bb2cca-34 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/be06bb3c3b604660fd36b2af8860d35e31c8bbf3-8 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/be5767f4d79c5a0b2643d8eddb74eca0598674dc-19 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/c07f4e4cb1d0a34dc6899097fd27ee9f1744cb70-12 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/c1972d0c898848e6188b69bcdbb7d14fcc780ee5-26 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/c2ac55a7fb702dd9a527b576d99008fe9b4f376f-14 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/c2c3d29bce8aae89fed326832b3e1e1077cef1da-18 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/c321670bbcd985327045dd1468bf2ac4ae7333e5-7 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/c34998d9a8893eca9cdeafe7b2482469ad98192b-25 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/c42ae63ab9584753959f4692cef9fd8513b54691-30 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/c5522d11f314fc46de58e15116b6910d52acf866-17 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/c652c46aba3567521f912bae6dc263b668c34c9c-7 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/c6610b87900912d462229a5259dab51ea0aeef33-4 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/c6c37f6c89fe55768f8b3f7b28b99467c239703a-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/c71abfffdcf530a6d28fd99cd2c3505c61ef0ac5-8 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/c77304b250e887b39b5447d19b9c106fcebe7e66-20 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/c78cd8530e6d8a606a28797552ce3f5494763621-25 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/c790308a65efa1b895bc57abe53e4fbcdb2b7d0e-13 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/c7fe1fe2e3fc19fab3766f9fdb1d22c848d49aed-2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/c8b01a7ea9c1b84e4ee5eb68121c64f183e7ea10-9 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ca5d375d8a66727221d3e198d4ad360782944de7-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/cb1314cc880a1a389cedf5c16cc4b8ad505b4506-23 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/cb635ef244cb6affc005c63d0bf8b52aecb1d986-4 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/cd67bf90feaeb1912792508afa01a09fe1f044c6-13 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/cda434677d4bdd969a3bbf84086349f821e39c80-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ceb22e7f581d85ed876e3d61da7df65da8954bf2-32 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/cfe7201e28d42484764264c231663e6372e95ef7-14 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/cff88dd94ee94e1901d25a74e29ad863bb78b1e4-16 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/cffc7573debb5af80aaddfa752538825275fd6a9-7 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/d0ae058f71e53a7afd648b859cd7485886be550d-22 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/d0e6298a63ffc2695cf7d016a124db7375f197cf-21 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/d24f23a23508dd6bc93ea6283ed49c8ba4b737ed-15 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/d295ca4c78f7fd3ff10b0520b09a0a346310e0a9-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/d3ddffcd038a5646a53d48b684eac5b721c7062a-18 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/d4275f1f814a5b24f7b4788d15f3fef7b2be8aef-23 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/d57eaf0fada8726afac2287cafb7720af7417b16-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/d5c9dc3b5b4e71d902fe4cf5c44b237b104a32a9-4 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/d7855c38db11bfeeb474a4782f1ea293192f786f-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/d7912c5e2a776c408e7640f10bd7d655a6a0f31b-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/d8873ec9a0344ea23f70d1ffd78c2fd0435b9885-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/da3418e70658be491531ef6524f6ef7984ff9e96-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/daffc68f738bd5945de9c7babd4e01cc4438fae8-31 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/dba53c14b92561071ccd7762550d53cf43027bdf-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/dc61bdd2fb983111d1392cd79ba9b39e0a3b869f-20 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/dcb49d3d45d32601fa27208cec33813e03ff6179-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/dce9966b94744440d75a845a48c806041f5a6612-3 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/dd799919262810add464dbb4ee39a38f1e4ed258-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/dd92516fbea2d0f96abc78f325d731053a451e16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ddf986569f89016184b5b6e924d5ba827c9980ca-28 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/de0acf1136a1e05cd27345ce135ea26abd32bbfe-18 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/de33e3ef8a5780c7d3458188a423c00f470904d0-15 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/de501127da94246b2d3aa947637b49fbc17d5e47-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/de702cd20caeb08a843e0c09b0ce87a74e300415-20 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/de8abda1b9bd5628ca99c8f97237fa885a857bb5-19 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/def6a9e986daf0b268ef29ef7e821a9f6840ef2c-8 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/df0768cf0c709a1ff1a93cc0dad23979501c54ff-21 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/df5bd5044e9b74c648b5f5fcb4dbdf953175f9f9-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/dfad565009b0667ef2ee10ea9c1286ee5c3ce6b2-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/e.txt.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/e1556049ba9794a15ee21aa283876bf63e531a4f-24 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/e17af76e8c119233dbd2888ab519bd76d7aa7fe9-6 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/e22a5ac115e8bfd3468c9e6ad73ea11b8743798a-30 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/e346c715ac3187598d8c0453d9e741fae1232c99-11 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/e3acf6f2b5a1b97f5a82ebf7d1822077561583fe-26 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/e4a2a1469de980756c607cdc2584fc94bc109382-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/e544de8de59a005934dd4b7fd465c5bb0046482e-26 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/e68b04a675d8d4192565a808955764c77ae510e6-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/e7ea1bfd65ca7db84f0984474658bfc3b063c63a-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/e7f55f4c85203100c3cd819cdc87abb0e9e86374-32 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/ea212596f8a7aec4eb2e85fd2cdb5c2816b58495-5 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ea83e3b78398628e8a85e2e618fa956c0ffbd733-35 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/ea9af92f89e6889b523461ae7b2b9fecee5a7280-18 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/eb967d9cb0407c2328bbdbf98b5602274452d900-23 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ebc69b7ca13ae23b075c9b21ebc283278714e3aa-18 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ec8e760e79dc08a79af0d79c510cafb74e504472-18 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ec93fb54ce508e132c89b6637913f84c3c78bafd-29 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ec984b6fb8e41dbcd4299ecd1dd6fd0a77347122-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ecbd6bdea50b52d263b4e9cdb96c7ce078d2b780-25 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ecdd1df7d975c8cf8d015b2f1d0d7c6e00eb578b-15 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/eda1ee9cf85f3f71ec8a4eec7534ed2677b47775-15 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/edbc11de7dd074c367a69532db023cd810bb3978-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ee6afbf375619a2bd6fb0abe0e42e51ab3b0ab13-6 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/ee907d38c1394c4971b389a99a3be0913836212b-9 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/eebbefa1983c9e1aeb5217aabcac7ab24dfe166f-17 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/eee3d4a9a8b297f016c23f50a9792c30a621720e-21 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ef87432939473264357babc06257b0280ffd15ee-5 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/efd3db86b12d209db7f0b24281a2cccebff526cd-33 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/efdd522fe3abb88204f63b1fe7312f62b6ee593d-16 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/f31dcf6e3044e050f2396b601ebe420e89749c07-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/f35bdf2e8b4af93c6a73e564055aa4eacd9f0d0c-13 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/f3a2381d6f39defe22520aea46201e6ce6d37f80-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/f3e916907eab3412b5875e5eca05bf3eac8a8d5e-1 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/f3f49f3016c41052be090544cf110c322bc7ef63-24 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/f4003ca01b90a4ee1be5701a5dd7d5f04e00c8f8-28 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/f493376c3eda80cbe822ac456486734b72f891fc-2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/f55efbb04cd32f7828e951d067319db00627153f-28 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/f5ecb47dfd92bb0564588beefd03ffcb0bbdae54-29 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/f71b4776ecbbe47746fb53d7749751c5c5bbff05-22 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/f724d4c839c012c7772618e28ef68d478cc00c74-21 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/f86152e5ce510dc674fa73d20b324e2d3c4d145b-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/f931bee2e7f1fefd8bb2fabf88f8f3d2b3ea78fa-2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/f9bcd3660c355799a865fedd15cb27a18591f244-33 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/fac6c4165067ef2d87a23a2530a59eb560d470e0-23 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/fb56a1001599e07354ce3101af111554c6c9bb40-1 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/fb75f3059f8835a7e8781c899af756f22d1c06b4-7 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/fbfe35b0485040874ed564b94ba764bdd17e80fc-10 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/fcb1c8b1893ca85647581cadec481754d8f35c96-12 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/fcb33fb48e48acd9155fd7ed8e82e71c850ffd22-16 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/fcd47a15e10a21e1eb13aeac223becc89aac4c69-2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/fd4f0dc77a022a8140ffe5b2e1a5ff577e844878-27 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/fdb78af507e72288b059ff902ae5e76538d1e6ea-14 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/fe002e4c7731ecb4c09c09a4e1fa29c0c61874bc-7 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/fe78d4faf4ce717d84938010f92ca5e844f9980b-13 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ff3b7ea844eb197dc6bd59d9f8e4a4a5718a6771-18 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/ff47856b8fa7323572c8b4a6d8028dcb2663a37a-11 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/corpus/ffa97253e1ab365b84eebb9d257f9370b7796fbf-28 delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/corpus/pss-vect.txt.bz2 delete mode 100644 vendor/github.com/pierrec/lz4/fuzz/lz4-fuzz.zip delete mode 100755 vendor/github.com/pierrec/lz4/fuzz/lz4.go delete mode 100644 vendor/github.com/pierrec/lz4/go.mod delete mode 100644 vendor/github.com/pierrec/lz4/go.sum delete mode 100644 vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero_test.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4c/main.go create mode 100644 vendor/github.com/pierrec/lz4/reader_legacy.go delete mode 100644 vendor/github.com/pierrec/lz4/reader_test.go delete mode 100644 vendor/github.com/pierrec/lz4/testdata/Mark.Twain-Tom.Sawyer.txt delete mode 100644 vendor/github.com/pierrec/lz4/testdata/Mark.Twain-Tom.Sawyer.txt.lz4 delete mode 100644 vendor/github.com/pierrec/lz4/testdata/Mark.Twain-Tom.Sawyer_long.txt delete mode 100644 vendor/github.com/pierrec/lz4/testdata/Mark.Twain-Tom.Sawyer_long.txt.lz4 delete mode 100644 vendor/github.com/pierrec/lz4/testdata/README.txt delete mode 100644 vendor/github.com/pierrec/lz4/testdata/e.txt delete mode 100644 vendor/github.com/pierrec/lz4/testdata/e.txt.lz4 delete mode 100644 vendor/github.com/pierrec/lz4/testdata/gettysburg.txt delete mode 100644 vendor/github.com/pierrec/lz4/testdata/gettysburg.txt.lz4 delete mode 100644 vendor/github.com/pierrec/lz4/testdata/pg1661.txt delete mode 100644 vendor/github.com/pierrec/lz4/testdata/pg1661.txt.lz4 delete mode 100644 vendor/github.com/pierrec/lz4/testdata/pi.txt delete mode 100644 vendor/github.com/pierrec/lz4/testdata/pi.txt.lz4 delete mode 100644 vendor/github.com/pierrec/lz4/testdata/random.data delete mode 100644 vendor/github.com/pierrec/lz4/testdata/random.data.lz4 delete mode 100644 vendor/github.com/pierrec/lz4/testdata/repeat.txt delete mode 100644 vendor/github.com/pierrec/lz4/testdata/repeat.txt.lz4 delete mode 100644 vendor/github.com/pierrec/lz4/writer_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/.gitignore delete mode 100644 vendor/github.com/prometheus/client_golang/.travis.yml delete mode 100644 vendor/github.com/prometheus/client_golang/AUTHORS.md delete mode 100644 vendor/github.com/prometheus/client_golang/CHANGELOG.md delete mode 100644 vendor/github.com/prometheus/client_golang/CONTRIBUTING.md delete mode 100644 vendor/github.com/prometheus/client_golang/README.md delete mode 100644 vendor/github.com/prometheus/client_golang/VERSION delete mode 100644 vendor/github.com/prometheus/client_golang/api/prometheus/api.go delete mode 100644 vendor/github.com/prometheus/client_golang/api/prometheus/api_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/examples/random/main.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go rename vendor/github.com/prometheus/client_golang/prometheus/{metric_test.go => build_info.go} (52%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/examples_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push/push.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go delete mode 100644 vendor/github.com/prometheus/client_model/.gitignore delete mode 100644 vendor/github.com/prometheus/client_model/CONTRIBUTING.md delete mode 100644 vendor/github.com/prometheus/client_model/MAINTAINERS.md delete mode 100644 vendor/github.com/prometheus/client_model/Makefile delete mode 100644 vendor/github.com/prometheus/client_model/README.md delete mode 100644 vendor/github.com/prometheus/client_model/cpp/metrics.pb.cc delete mode 100644 vendor/github.com/prometheus/client_model/cpp/metrics.pb.h delete mode 100644 vendor/github.com/prometheus/client_model/metrics.proto delete mode 100644 vendor/github.com/prometheus/client_model/pom.xml delete mode 100644 vendor/github.com/prometheus/client_model/python/prometheus/__init__.py delete mode 100644 vendor/github.com/prometheus/client_model/python/prometheus/client/__init__.py delete mode 100644 vendor/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py delete mode 100644 vendor/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py delete mode 100644 vendor/github.com/prometheus/client_model/ruby/.gitignore delete mode 100644 vendor/github.com/prometheus/client_model/ruby/Gemfile delete mode 100644 vendor/github.com/prometheus/client_model/ruby/LICENSE delete mode 100644 vendor/github.com/prometheus/client_model/ruby/Makefile delete mode 100644 vendor/github.com/prometheus/client_model/ruby/README.md delete mode 100644 vendor/github.com/prometheus/client_model/ruby/Rakefile delete mode 100644 vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb delete mode 100644 vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb delete mode 100644 vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb delete mode 100644 vendor/github.com/prometheus/client_model/ruby/prometheus-client-model.gemspec delete mode 100644 vendor/github.com/prometheus/client_model/setup.py delete mode 100644 vendor/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java delete mode 100644 vendor/github.com/prometheus/common/.travis.yml delete mode 100644 vendor/github.com/prometheus/common/CONTRIBUTING.md delete mode 100644 vendor/github.com/prometheus/common/MAINTAINERS.md delete mode 100644 vendor/github.com/prometheus/common/README.md delete mode 100644 vendor/github.com/prometheus/common/config/config.go delete mode 100644 vendor/github.com/prometheus/common/config/http_config.go delete mode 100644 vendor/github.com/prometheus/common/config/http_config_test.go delete mode 100644 vendor/github.com/prometheus/common/config/testdata/barney-no-pass.key delete mode 100644 vendor/github.com/prometheus/common/config/testdata/barney.crt delete mode 100644 vendor/github.com/prometheus/common/config/testdata/basic-auth-password delete mode 100644 vendor/github.com/prometheus/common/config/testdata/bearer.token delete mode 100644 vendor/github.com/prometheus/common/config/testdata/http.conf.basic-auth.good.yaml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/http.conf.basic-auth.no-password.yaml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/http.conf.basic-auth.no-username.yaml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/http.conf.basic-auth.too-much.bad.yaml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/http.conf.bearer-token-and-file-set.bad.yml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/http.conf.empty.bad.yml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/http.conf.good.yml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/http.conf.invalid-bearer-token-file.bad.yml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/server.crt delete mode 100644 vendor/github.com/prometheus/common/config/testdata/server.key delete mode 100644 vendor/github.com/prometheus/common/config/testdata/tls-ca-chain.pem delete mode 100644 vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml delete mode 100644 vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml delete mode 100644 vendor/github.com/prometheus/common/config/tls_config_test.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/bench_test.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/decode_test.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal create mode 100644 vendor/github.com/prometheus/common/expfmt/openmetrics_create.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/json2 delete mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/json2_bad delete mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/protobuf delete mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz delete mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/text delete mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/text.gz delete mode 100644 vendor/github.com/prometheus/common/expfmt/text_create_test.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse_test.go delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go delete mode 100644 vendor/github.com/prometheus/common/log/log_test.go delete mode 100644 vendor/github.com/prometheus/common/log/syslog_formatter_test.go delete mode 100644 vendor/github.com/prometheus/common/model/alert_test.go delete mode 100644 vendor/github.com/prometheus/common/model/labels_test.go delete mode 100644 vendor/github.com/prometheus/common/model/metric_test.go delete mode 100644 vendor/github.com/prometheus/common/model/signature_test.go delete mode 100644 vendor/github.com/prometheus/common/model/silence_test.go delete mode 100644 vendor/github.com/prometheus/common/model/time_test.go delete mode 100644 vendor/github.com/prometheus/common/model/value_test.go delete mode 100644 vendor/github.com/prometheus/common/promlog/flag/flag.go delete mode 100644 vendor/github.com/prometheus/common/promlog/log.go delete mode 100644 vendor/github.com/prometheus/common/route/route.go delete mode 100644 vendor/github.com/prometheus/common/route/route_test.go delete mode 100644 vendor/github.com/prometheus/procfs/.circleci/config.yml create mode 100644 vendor/github.com/prometheus/procfs/.golangci.yml create mode 100644 vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/prometheus/procfs/Makefile.common create mode 100644 vendor/github.com/prometheus/procfs/SECURITY.md create mode 100644 vendor/github.com/prometheus/procfs/arp.go delete mode 100644 vendor/github.com/prometheus/procfs/bcache/bcache.go delete mode 100644 vendor/github.com/prometheus/procfs/bcache/get.go delete mode 100644 vendor/github.com/prometheus/procfs/bcache/get_test.go delete mode 100644 vendor/github.com/prometheus/procfs/buddyinfo_test.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo.go rename vendor/github.com/prometheus/procfs/{sysfs/doc.go => cpuinfo_armx.go} (77%) create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_others.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_s390x.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_x86.go create mode 100644 vendor/github.com/prometheus/procfs/crypto.go delete mode 100644 vendor/github.com/prometheus/procfs/fs_test.go create mode 100644 vendor/github.com/prometheus/procfs/fscache.go create mode 100644 vendor/github.com/prometheus/procfs/go.mod create mode 100644 vendor/github.com/prometheus/procfs/go.sum create mode 100644 vendor/github.com/prometheus/procfs/internal/fs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/readfile.go rename vendor/github.com/prometheus/procfs/internal/util/{sysreadfile_linux.go => sysreadfile.go} (88%) rename vendor/github.com/prometheus/{client_golang/examples/simple/main.go => procfs/internal/util/sysreadfile_compat.go} (58%) create mode 100644 vendor/github.com/prometheus/procfs/internal/util/valueparser.go delete mode 100644 vendor/github.com/prometheus/procfs/ipvs_test.go create mode 100644 vendor/github.com/prometheus/procfs/kernel_random.go create mode 100644 vendor/github.com/prometheus/procfs/loadavg.go delete mode 100644 vendor/github.com/prometheus/procfs/mdstat_test.go create mode 100644 vendor/github.com/prometheus/procfs/meminfo.go create mode 100644 vendor/github.com/prometheus/procfs/mountinfo.go delete mode 100644 vendor/github.com/prometheus/procfs/mountstats_test.go create mode 100644 vendor/github.com/prometheus/procfs/net_conntrackstat.go delete mode 100644 vendor/github.com/prometheus/procfs/net_dev_test.go create mode 100644 vendor/github.com/prometheus/procfs/net_ip_socket.go create mode 100644 vendor/github.com/prometheus/procfs/net_protocols.go create mode 100644 vendor/github.com/prometheus/procfs/net_sockstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_softnet.go create mode 100644 vendor/github.com/prometheus/procfs/net_tcp.go create mode 100644 vendor/github.com/prometheus/procfs/net_udp.go create mode 100644 vendor/github.com/prometheus/procfs/net_unix.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/nfs.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/parse.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfs.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfsd_test.go create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroup.go create mode 100644 vendor/github.com/prometheus/procfs/proc_environ.go create mode 100644 vendor/github.com/prometheus/procfs/proc_fdinfo.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_io_test.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_limits_test.go create mode 100644 vendor/github.com/prometheus/procfs/proc_maps.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_ns_test.go create mode 100644 vendor/github.com/prometheus/procfs/proc_psi.go create mode 100644 vendor/github.com/prometheus/procfs/proc_smaps.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_stat_test.go create mode 100644 vendor/github.com/prometheus/procfs/proc_status.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_test.go create mode 100644 vendor/github.com/prometheus/procfs/schedstat.go delete mode 100755 vendor/github.com/prometheus/procfs/scripts/check_license.sh create mode 100644 vendor/github.com/prometheus/procfs/slab.go delete mode 100644 vendor/github.com/prometheus/procfs/stat_test.go create mode 100644 vendor/github.com/prometheus/procfs/swaps.go delete mode 100644 vendor/github.com/prometheus/procfs/sysfs/.gitignore delete mode 100644 vendor/github.com/prometheus/procfs/sysfs/fixtures.ttar delete mode 100644 vendor/github.com/prometheus/procfs/sysfs/fs.go delete mode 100644 vendor/github.com/prometheus/procfs/sysfs/fs_test.go delete mode 100644 vendor/github.com/prometheus/procfs/sysfs/net_class.go delete mode 100644 vendor/github.com/prometheus/procfs/sysfs/net_class_test.go delete mode 100644 vendor/github.com/prometheus/procfs/sysfs/system_cpu.go delete mode 100644 vendor/github.com/prometheus/procfs/sysfs/system_cpu_test.go mode change 100755 => 100644 vendor/github.com/prometheus/procfs/ttar create mode 100644 vendor/github.com/prometheus/procfs/vm.go delete mode 100644 vendor/github.com/prometheus/procfs/xfrm_test.go delete mode 100644 vendor/github.com/prometheus/procfs/xfs/parse.go delete mode 100644 vendor/github.com/prometheus/procfs/xfs/parse_test.go delete mode 100644 vendor/github.com/prometheus/procfs/xfs/xfs.go create mode 100644 vendor/github.com/prometheus/procfs/zoneinfo.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/cmd/metrics-bench/metrics-bench.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/cmd/metrics-example/metrics-example.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/cmd/never-read/never-read.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/counter_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/debug_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/ewma_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/exp/exp.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/graphite_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/histogram_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/json_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/librato/client.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/librato/librato.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/meter_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/metrics_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/registry_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/sample_test.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/stathat/stathat.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/timer_test.go mode change 100755 => 100644 vendor/github.com/rcrowley/go-metrics/validate.sh delete mode 100644 vendor/github.com/rcrowley/go-metrics/writer_test.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/.gitignore delete mode 100644 vendor/github.com/samuel/go-zookeeper/.travis.yml delete mode 100644 vendor/github.com/samuel/go-zookeeper/README.md delete mode 100644 vendor/github.com/samuel/go-zookeeper/examples/basic.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/cluster_test.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/conn_test.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/constants_test.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider_test.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/flw_test.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/lock_test.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/server_help.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/server_java.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/structs_test.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/throttle_test.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/util_test.go delete mode 100644 vendor/github.com/samuel/go-zookeeper/zk/zk_test.go create mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml delete mode 100644 vendor/github.com/sirupsen/logrus/alt_exit_test.go delete mode 100644 vendor/github.com/sirupsen/logrus/entry_test.go delete mode 100644 vendor/github.com/sirupsen/logrus/example_basic_test.go delete mode 100644 vendor/github.com/sirupsen/logrus/example_global_hook_test.go delete mode 100644 vendor/github.com/sirupsen/logrus/example_hook_test.go delete mode 100644 vendor/github.com/sirupsen/logrus/formatter_bench_test.go delete mode 100644 vendor/github.com/sirupsen/logrus/hook_test.go delete mode 100644 vendor/github.com/sirupsen/logrus/hooks/syslog/README.md delete mode 100644 vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go delete mode 100644 vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go delete mode 100644 vendor/github.com/sirupsen/logrus/internal/testutils/testutils.go delete mode 100644 vendor/github.com/sirupsen/logrus/json_formatter_test.go delete mode 100644 vendor/github.com/sirupsen/logrus/logger_bench_test.go delete mode 100644 vendor/github.com/sirupsen/logrus/logger_test.go delete mode 100644 vendor/github.com/sirupsen/logrus/logrus_test.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_notwindows.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_windows.go delete mode 100644 vendor/github.com/sirupsen/logrus/text_formatter_test.go rename vendor/github.com/{pierrec/lz4/fuzz/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709 => xdg/scram/.gitignore} (100%) create mode 100644 vendor/github.com/xdg/scram/.travis.yml create mode 100644 vendor/github.com/xdg/scram/LICENSE create mode 100644 vendor/github.com/xdg/scram/README.md create mode 100644 vendor/github.com/xdg/scram/client.go create mode 100644 vendor/github.com/xdg/scram/client_conv.go create mode 100644 vendor/github.com/xdg/scram/common.go create mode 100644 vendor/github.com/xdg/scram/doc.go create mode 100644 vendor/github.com/xdg/scram/parse.go create mode 100644 vendor/github.com/xdg/scram/scram.go create mode 100644 vendor/github.com/xdg/scram/server.go create mode 100644 vendor/github.com/xdg/scram/server_conv.go rename vendor/github.com/{pierrec/lz4/fuzz/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709-1 => xdg/stringprep/.gitignore} (100%) mode change 100755 => 100644 create mode 100644 vendor/github.com/xdg/stringprep/.travis.yml create mode 100644 vendor/github.com/xdg/stringprep/LICENSE create mode 100644 vendor/github.com/xdg/stringprep/README.md create mode 100644 vendor/github.com/xdg/stringprep/bidi.go create mode 100644 vendor/github.com/xdg/stringprep/doc.go create mode 100644 vendor/github.com/xdg/stringprep/error.go create mode 100644 vendor/github.com/xdg/stringprep/map.go create mode 100644 vendor/github.com/xdg/stringprep/profile.go create mode 100644 vendor/github.com/xdg/stringprep/saslprep.go create mode 100644 vendor/github.com/xdg/stringprep/set.go create mode 100644 vendor/github.com/xdg/stringprep/tables.go delete mode 100644 vendor/golang.org/x/crypto/.gitattributes delete mode 100644 vendor/golang.org/x/crypto/.gitignore delete mode 100644 vendor/golang.org/x/crypto/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/crypto/README.md delete mode 100644 vendor/golang.org/x/crypto/acme/acme.go delete mode 100644 vendor/golang.org/x/crypto/acme/acme_test.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/autocert.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/autocert_test.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/cache.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/cache_test.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/example_test.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/internal/acmetest/ca.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/listener.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/renewal.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/renewal_test.go delete mode 100644 vendor/golang.org/x/crypto/acme/http.go delete mode 100644 vendor/golang.org/x/crypto/acme/http_test.go delete mode 100644 vendor/golang.org/x/crypto/acme/jws.go delete mode 100644 vendor/golang.org/x/crypto/acme/jws_test.go delete mode 100644 vendor/golang.org/x/crypto/acme/types.go delete mode 100644 vendor/golang.org/x/crypto/acme/types_test.go delete mode 100644 vendor/golang.org/x/crypto/argon2/argon2.go delete mode 100644 vendor/golang.org/x/crypto/argon2/argon2_test.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blake2b.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.s delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_generic.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_ref.go delete mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go delete mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go delete mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_test.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/register.go delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s.go delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_386.go delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_386.s delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_generic.go delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_ref.go delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_test.go delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2x.go delete mode 100644 vendor/golang.org/x/crypto/blake2s/register.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/block.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/blowfish_test.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/const.go delete mode 100644 vendor/golang.org/x/crypto/bn256/bn256.go delete mode 100644 vendor/golang.org/x/crypto/bn256/bn256_test.go delete mode 100644 vendor/golang.org/x/crypto/bn256/constants.go delete mode 100644 vendor/golang.org/x/crypto/bn256/curve.go delete mode 100644 vendor/golang.org/x/crypto/bn256/example_test.go delete mode 100644 vendor/golang.org/x/crypto/bn256/gfp12.go delete mode 100644 vendor/golang.org/x/crypto/bn256/gfp2.go delete mode 100644 vendor/golang.org/x/crypto/bn256/gfp6.go delete mode 100644 vendor/golang.org/x/crypto/bn256/optate.go delete mode 100644 vendor/golang.org/x/crypto/bn256/twist.go delete mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go delete mode 100644 vendor/golang.org/x/crypto/cast5/cast5_test.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_test.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_vectors_test.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go delete mode 100644 vendor/golang.org/x/crypto/codereview.cfg delete mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1.go delete mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go delete mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1_test.go delete mode 100644 vendor/golang.org/x/crypto/cryptobyte/builder.go delete mode 100644 vendor/golang.org/x/crypto/cryptobyte/cryptobyte_test.go delete mode 100644 vendor/golang.org/x/crypto/cryptobyte/example_test.go delete mode 100644 vendor/golang.org/x/crypto/cryptobyte/string.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.h delete mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/cswap_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_test.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/doc.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/freeze_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/mul_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/square_amd64.s delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519_test.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/testdata/sign.input.gz delete mode 100644 vendor/golang.org/x/crypto/hkdf/example_test.go delete mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf.go delete mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf_test.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_test.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/vectors_test.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/xor.go delete mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing.go delete mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go delete mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing_test.go delete mode 100644 vendor/golang.org/x/crypto/md4/example_test.go delete mode 100644 vendor/golang.org/x/crypto/md4/md4_test.go delete mode 100644 vendor/golang.org/x/crypto/nacl/auth/auth.go delete mode 100644 vendor/golang.org/x/crypto/nacl/auth/auth_test.go delete mode 100644 vendor/golang.org/x/crypto/nacl/auth/example_test.go delete mode 100644 vendor/golang.org/x/crypto/nacl/box/box.go delete mode 100644 vendor/golang.org/x/crypto/nacl/box/box_test.go delete mode 100644 vendor/golang.org/x/crypto/nacl/box/example_test.go delete mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/example_test.go delete mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go delete mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox_test.go delete mode 100644 vendor/golang.org/x/crypto/nacl/sign/sign.go delete mode 100644 vendor/golang.org/x/crypto/nacl/sign/sign_test.go delete mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp.go delete mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/keys_data_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/keys_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/read.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/read_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k_test.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/write.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/write_test.go delete mode 100644 vendor/golang.org/x/crypto/otr/libotr_test_helper.c delete mode 100644 vendor/golang.org/x/crypto/otr/otr.go delete mode 100644 vendor/golang.org/x/crypto/otr/otr_test.go delete mode 100644 vendor/golang.org/x/crypto/otr/smp.go delete mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string_test.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto_test.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/errors.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/mac.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/mac_test.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf_test.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12_test.go delete mode 100644 vendor/golang.org/x/crypto/pkcs12/safebags.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305_test.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.s delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.s delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_noasm.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ref.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.s delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s delete mode 100644 vendor/golang.org/x/crypto/poly1305/vectors_test.go delete mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160.go delete mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go delete mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160block.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa_test.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa20.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa20_test.go delete mode 100644 vendor/golang.org/x/crypto/scrypt/example_test.go delete mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go delete mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt_test.go delete mode 100644 vendor/golang.org/x/crypto/sha3/doc.go delete mode 100644 vendor/golang.org/x/crypto/sha3/hashes.go delete mode 100644 vendor/golang.org/x/crypto/sha3/hashes_generic.go delete mode 100644 vendor/golang.org/x/crypto/sha3/keccakf.go delete mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.go delete mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.s delete mode 100644 vendor/golang.org/x/crypto/sha3/register.go delete mode 100644 vendor/golang.org/x/crypto/sha3/sha3.go delete mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.go delete mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.s delete mode 100644 vendor/golang.org/x/crypto/sha3/sha3_test.go delete mode 100644 vendor/golang.org/x/crypto/sha3/shake.go delete mode 100644 vendor/golang.org/x/crypto/sha3/shake_generic.go delete mode 100644 vendor/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflate delete mode 100644 vendor/golang.org/x/crypto/sha3/xor.go delete mode 100644 vendor/golang.org/x/crypto/sha3/xor_generic.go delete mode 100644 vendor/golang.org/x/crypto/sha3/xor_unaligned.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/client.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/client_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/example_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/forward.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/keyring.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/keyring_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/server.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/server_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/testdata_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/benchmark_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go delete mode 100644 vendor/golang.org/x/crypto/ssh/buffer_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/certs.go delete mode 100644 vendor/golang.org/x/crypto/ssh/certs_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/channel.go delete mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go delete mode 100644 vendor/golang.org/x/crypto/ssh/cipher_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client_auth_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/common.go delete mode 100644 vendor/golang.org/x/crypto/ssh/connection.go delete mode 100644 vendor/golang.org/x/crypto/ssh/doc.go delete mode 100644 vendor/golang.org/x/crypto/ssh/example_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go delete mode 100644 vendor/golang.org/x/crypto/ssh/handshake_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/kex.go delete mode 100644 vendor/golang.org/x/crypto/ssh/kex_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/keys.go delete mode 100644 vendor/golang.org/x/crypto/ssh/keys_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go delete mode 100644 vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mac.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mempipe_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/messages.go delete mode 100644 vendor/golang.org/x/crypto/ssh/messages_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mux.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mux_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/server.go delete mode 100644 vendor/golang.org/x/crypto/ssh/session.go delete mode 100644 vendor/golang.org/x/crypto/ssh/session_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go delete mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go delete mode 100644 vendor/golang.org/x/crypto/ssh/tcpip_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_linux.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_windows.go delete mode 100644 vendor/golang.org/x/crypto/ssh/testdata/doc.go delete mode 100644 vendor/golang.org/x/crypto/ssh/testdata/keys.go delete mode 100644 vendor/golang.org/x/crypto/ssh/testdata_test.go delete mode 100644 vendor/golang.org/x/crypto/ssh/transport.go delete mode 100644 vendor/golang.org/x/crypto/ssh/transport_test.go delete mode 100644 vendor/golang.org/x/crypto/tea/cipher.go delete mode 100644 vendor/golang.org/x/crypto/tea/tea_test.go delete mode 100644 vendor/golang.org/x/crypto/twofish/twofish.go delete mode 100644 vendor/golang.org/x/crypto/twofish/twofish_test.go delete mode 100644 vendor/golang.org/x/crypto/xtea/block.go delete mode 100644 vendor/golang.org/x/crypto/xtea/cipher.go delete mode 100644 vendor/golang.org/x/crypto/xtea/xtea_test.go delete mode 100644 vendor/golang.org/x/crypto/xts/xts.go delete mode 100644 vendor/golang.org/x/crypto/xts/xts_test.go create mode 100644 vendor/golang.org/x/net/AUTHORS create mode 100644 vendor/golang.org/x/net/CONTRIBUTORS create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go create mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go create mode 100644 vendor/golang.org/x/net/internal/socks/client.go create mode 100644 vendor/golang.org/x/net/internal/socks/socks.go create mode 100644 vendor/golang.org/x/net/proxy/dial.go create mode 100644 vendor/golang.org/x/net/proxy/direct.go create mode 100644 vendor/golang.org/x/net/proxy/per_host.go create mode 100644 vendor/golang.org/x/net/proxy/proxy.go create mode 100644 vendor/golang.org/x/net/proxy/socks5.go delete mode 100644 vendor/golang.org/x/sys/.gitattributes delete mode 100644 vendor/golang.org/x/sys/.gitignore delete mode 100644 vendor/golang.org/x/sys/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/sys/README.md delete mode 100644 vendor/golang.org/x/sys/codereview.cfg delete mode 100644 vendor/golang.org/x/sys/cpu/cpu.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo.c delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_test.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go delete mode 100644 vendor/golang.org/x/sys/plan9/asm.s delete mode 100644 vendor/golang.org/x/sys/plan9/const_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/dir_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/env_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/errors_plan9.go delete mode 100755 vendor/golang.org/x/sys/plan9/mkall.sh delete mode 100755 vendor/golang.org/x/sys/plan9/mkerrors.sh delete mode 100755 vendor/golang.org/x/sys/plan9/mksyscall.pl delete mode 100755 vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh delete mode 100644 vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/pwd_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/race.go delete mode 100644 vendor/golang.org/x/sys/plan9/race0.go delete mode 100644 vendor/golang.org/x/sys/plan9/str.go delete mode 100644 vendor/golang.org/x/sys/plan9/syscall.go delete mode 100644 vendor/golang.org/x/sys/plan9/syscall_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/syscall_test.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsysnum_plan9.go rename vendor/golang.org/x/sys/{plan9/asm_plan9_amd64.s => unix/asm_freebsd_arm64.s} (61%) create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_riscv64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s rename vendor/golang.org/x/sys/{plan9/asm_plan9_arm.s => unix/asm_openbsd_arm64.s} (52%) rename vendor/golang.org/x/sys/{plan9/asm_plan9_386.s => unix/asm_openbsd_mips64.s} (51%) create mode 100644 vendor/golang.org/x/sys/unix/asm_zos_s390x.s delete mode 100644 vendor/golang.org/x/sys/unix/creds_test.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_linux_test.go create mode 100644 vendor/golang.org/x/sys/unix/dev_zos.go create mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/example_test.go delete mode 100644 vendor/golang.org/x/sys/unix/export_test.go create mode 100644 vendor/golang.org/x/sys/unix/fcntl_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/fdset.go create mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/linux/Dockerfile delete mode 100644 vendor/golang.org/x/sys/unix/linux/mkall.go delete mode 100755 vendor/golang.org/x/sys/unix/linux/mksysnum.pl delete mode 100644 vendor/golang.org/x/sys/unix/linux/types.go mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mkall.sh mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mkerrors.sh delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go delete mode 100755 vendor/golang.org/x/sys/unix/mksyscall.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksyscall_solaris.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_darwin.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl delete mode 100644 vendor/golang.org/x/sys/unix/mmap_unix_test.go delete mode 100644 vendor/golang.org/x/sys/unix/openbsd_test.go rename vendor/golang.org/x/sys/unix/{openbsd_pledge.go => pledge_openbsd.go} (98%) create mode 100644 vendor/golang.org/x/sys/unix/ptrace_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/ptrace_ios.go create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdents.go create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdirentries.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_test.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd_test.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_test.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_test.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_illumos.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_test.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_test.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_test.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_test.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_test.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_test.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/timestruct_test.go delete mode 100644 vendor/golang.org/x/sys/unix/types_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go rename vendor/golang.org/x/sys/unix/{openbsd_unveil.go => unveil_openbsd.go} (98%) delete mode 100644 vendor/golang.org/x/sys/unix/xattr_test.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go rename vendor/golang.org/x/sys/unix/{zptracearm_linux.go => zptrace_armnn_linux.go} (90%) create mode 100644 vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go rename vendor/golang.org/x/sys/unix/{zptracemips_linux.go => zptrace_mipsnn_linux.go} (91%) rename vendor/golang.org/x/sys/unix/{zptracemipsle_linux.go => zptrace_mipsnnle_linux.go} (91%) rename vendor/golang.org/x/sys/unix/{zptrace386_linux.go => zptrace_x86_linux.go} (93%) create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/windows/asm_windows_386.s delete mode 100644 vendor/golang.org/x/sys/windows/asm_windows_amd64.s delete mode 100644 vendor/golang.org/x/sys/windows/asm_windows_arm.s create mode 100644 vendor/golang.org/x/sys/windows/empty.s create mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash create mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash delete mode 100644 vendor/golang.org/x/sys/windows/registry/export_test.go delete mode 100644 vendor/golang.org/x/sys/windows/registry/registry_test.go create mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/debug/log.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/debug/service.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/event.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/eventlog/log_test.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/example/beep.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/example/install.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/example/main.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/example/manage.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/example/service.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/go12.c delete mode 100644 vendor/golang.org/x/sys/windows/svc/go12.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/go13.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/config.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/mgr.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/recovery.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/service.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/security.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/service.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/svc_test.go delete mode 100644 vendor/golang.org/x/sys/windows/svc/sys_386.s delete mode 100644 vendor/golang.org/x/sys/windows/svc/sys_amd64.s delete mode 100644 vendor/golang.org/x/sys/windows/svc/sys_arm.s delete mode 100644 vendor/golang.org/x/sys/windows/syscall_test.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall_windows_test.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go create mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go create mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go create mode 100644 vendor/golang.org/x/text/AUTHORS create mode 100644 vendor/golang.org/x/text/CONTRIBUTORS create mode 100644 vendor/golang.org/x/text/LICENSE create mode 100644 vendor/golang.org/x/text/PATENTS create mode 100644 vendor/golang.org/x/text/transform/transform.go create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 vendor/google.golang.org/protobuf/AUTHORS create mode 100644 vendor/google.golang.org/protobuf/CONTRIBUTORS create mode 100644 vendor/google.golang.org/protobuf/LICENSE create mode 100644 vendor/google.golang.org/protobuf/PATENTS create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go rename vendor/{golang.org/x/sys/cpu/cpu_ppc64x.go => google.golang.org/protobuf/internal/flags/proto_legacy_disable.go} (72%) rename vendor/{golang.org/x/sys/cpu/cpu_mipsx.go => google.golang.org/protobuf/internal/flags/proto_legacy_enable.go} (72%) create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go create mode 100644 vendor/google.golang.org/protobuf/proto/size.go create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go rename vendor/{golang.org/x/sys/cpu/cpu_mips64x.go => google.golang.org/protobuf/runtime/protoiface/legacy.go} (50%) create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go create mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/_examples/chat1/main.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/_examples/chat2/main.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/_examples/completion/main.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/_examples/curl/main.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/_examples/modular/main.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/_examples/ping/main.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/app_test.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/args_test.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/cmd/genvalues/main.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/cmd_test.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/completions_test.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/examples_test.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/flags_test.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/parser_test.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/parsers_test.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/usage_test.go delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/values_test.go create mode 100644 vendor/modules.txt diff --git a/.DS_Store b/.DS_Store index da09aade453cde57886b29e20b9d67d1ffa73ec1..2164c1fb9d1ff601eb8c1d845f9690f1d5b1cb90 100644 GIT binary patch delta 45 zcmZp1XmOa}&nUJrU^hRb*k&F91E$SsLRO5M?+9zNOl(No%r5brWpj$?Vy1}=$pB2j B4> building release tarball" @$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) -docker: +docker: build @echo ">> building docker image" @docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . diff --git a/go.mod b/go.mod index 3fbe4f53..352aec0b 100644 --- a/go.mod +++ b/go.mod @@ -1,31 +1,25 @@ module github.com/danielqsj/kafka_exporter -go 1.12 +go 1.16 require ( - github.com/Shopify/sarama v1.22.1 - github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc - github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf - github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 - github.com/davecgh/go-spew v1.1.1 - github.com/eapache/go-resiliency v1.1.0 - github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 - github.com/eapache/queue v1.1.0 - github.com/golang/protobuf v1.2.0 - github.com/golang/snappy v0.0.1 - github.com/konsorten/go-windows-terminal-sequences v1.0.1 + github.com/Shopify/sarama v1.28.0 + github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/queue v1.1.0 // indirect github.com/krallistic/kazoo-go v0.0.0-20170526135507-a15279744f4e - github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 - github.com/prometheus/client_golang v0.8.0 - github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 - github.com/prometheus/common v0.0.0-20181116084131-1f2c4f3cd6db - github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d - github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a - github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec - github.com/sirupsen/logrus v1.2.0 + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/pkg/profile v1.2.1 // indirect + github.com/prometheus/client_golang v1.9.0 + github.com/prometheus/common v0.19.0 + github.com/prometheus/procfs v0.6.0 // indirect + github.com/prometheus/promu v0.8.1 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c - golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 - golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e + golang.org/x/net v0.0.0-20210315170653-34ac3e1c2000 // indirect + golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 // indirect + golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005 // indirect + google.golang.org/appengine v1.6.7 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 ) diff --git a/go.sum b/go.sum index 88da9464..769a7a16 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,45 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.22.0 h1:rtiODsvY4jW6nUV6n3K+0gx/8WlAwVt+Ixt6RIvpYyo= @@ -10,76 +48,729 @@ github.com/Shopify/sarama v1.22.1-0.20190412022128-c14cccae56fa h1:UMPk5t5qV+uZE github.com/Shopify/sarama v1.22.1-0.20190412022128-c14cccae56fa/go.mod h1:FRzlvRpMFO/639zY1SDxUxkqH97Y0ndM5CbGj6oG3As= github.com/Shopify/sarama v1.22.1 h1:exyEsKLGyCsDiqpV5Lr4slFi8ev2KiM3cP1KZ6vnCQ0= github.com/Shopify/sarama v1.22.1/go.mod h1:FRzlvRpMFO/639zY1SDxUxkqH97Y0ndM5CbGj6oG3As= +github.com/Shopify/sarama v1.28.0 h1:lOi3SfE6OcFlW9Trgtked2aHNZ2BIG/d6Do+PEUAqqM= +github.com/Shopify/sarama v1.28.0/go.mod h1:j/2xTrU39dlzBmsxF1eQ2/DdWrxyBCl6pzz7a81o/ZY= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github/v25 v25.1.3 h1:Ht4YIQgUh4l4lc80fvGnw60khXysXvlgPxPP8uJG3EA= +github.com/google/go-github/v25 v25.1.3/go.mod h1:6z5pC69qHtrPJ0sXPsj4BLnd82b+r6sLB7qcBoRZqpw= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/krallistic/kazoo-go v0.0.0-20170526135507-a15279744f4e h1:IWiVY66Xy9YrDZ28qJMt1UTlh6x9UGW0aDH/o58CSnA= github.com/krallistic/kazoo-go v0.0.0-20170526135507-a15279744f4e/go.mod h1:Rq6003vCNoJNrT6ol0hMebQ3GWLWXSHrD/QcMlXt0EE= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v0.0.0-20181005164709-635575b42742 h1:wKfigKMTgvSzBLIVvB5QaBBQI0odU6n45/UKSphjLus= github.com/pierrec/lz4 v0.0.0-20181005164709-635575b42742/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 h1:GeinFsrjWz97fAxVUEd748aV0cYL+I6k44gFJTCVvpU= github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181116084131-1f2c4f3cd6db h1:ckMAAQJ96ZKwKyiGamJdsinLn3D9+daeRlvvmYo9tkI= github.com/prometheus/common v0.0.0-20181116084131-1f2c4f3cd6db/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.19.0 h1:Itb4+NjG9wRdkAWgVucbM/adyIXxEhbw0866e0uZE6A= +github.com/prometheus/common v0.19.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/promu v0.8.1 h1:ZxQeSWE48NjE2saVJ1Vj/NLcKE4quT2l03TWHJZ8/LA= +github.com/prometheus/promu v0.8.1/go.mod h1:2XjlqlTZ9elA2Xq6LNViqcYjcM6X8lk9ly9tf0GAR3c= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869 h1:kkXA53yGe04D0adEYJwEVQjeBppL01Exg+fnMjfUraU= golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 h1:bselrhR0Or1vomJZC8ZIjWtbDmn9OYFLX5Ik9alpJpE= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210315170653-34ac3e1c2000 h1:6mqyFav9MzRNys8OnKlbKYSJxsoVvhb773Si3bu5fYE= +golang.org/x/net v0.0.0-20210315170653-34ac3e1c2000/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 h1:duBc5zuJsmJXYOVVE/6PxejI+N3AaCqKjtsoLn1Je5Q= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8 h1:YoY1wS6JYVRpIfFngRf2HHo9R9dAne3xbkGOQ5rJXjU= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005 h1:pDMpM2zh2MT0kHy037cKlSby2nEhD50SYqwQk76Nm40= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/original.log b/original.log deleted file mode 100644 index 2bb5568a..00000000 --- a/original.log +++ /dev/null @@ -1,744 +0,0 @@ -commit 800e6b0262ac7c4c6ab42e32a0b287af1ed22a89 -Merge: 98133e4 fb93caa -Author: Matheus da luz -Date: Thu Jul 12 11:13:46 2018 -0300 - - merge branch 'master' of https://github.com/matheusdaluz/kafka_exporter - -commit 98133e47022fa07cef931a1570998191488daf23 -Author: Matheus.Costa -Date: Wed Jul 11 16:18:28 2018 -0300 - - parent 54278f0587bf8e2fa7c0b879d1da244535572e3a - author Matheus.Costa 1531336708 -0300 - committer Matheus da luz 1531404395 -0300 - - get lag from zookeeper - - Fix - - new package - - krallistic - - merge - - if removed - - dep and squash - - new version - - Test - - package krallistic with dep - -commit fb93caa702d271e920c770cd0f389d6aab1be19c -Author: matheusdaluz -Date: Thu Jul 12 09:45:51 2018 -0300 - - package krallistic with dep - -commit b0f592ffccc43f5dfb9ad54b02d504c0ba2601ff -Author: matheusdaluz -Date: Thu Jul 12 09:44:33 2018 -0300 - - Test - -commit 2395431b48722fb1bf0b10158c5c03747a44ed74 -Merge: b6d2058 2ebf1ad -Author: matheusdaluz -Date: Thu Jul 12 09:41:25 2018 -0300 - - Merge branch 'master' of https://github.com/matheusdaluz/kafka_exporter - -commit b6d2058c3bea3203f6b0c0238c879e1744c4ff55 -Author: matheusdaluz -Date: Thu Jul 12 09:27:06 2018 -0300 - - dep and squash - -commit 2ebf1ad53af8cc75eed35e71b6a804ba953167af -Author: matheusdaluz -Date: Thu Jul 12 08:55:20 2018 -0300 - - new version - -commit f3b0ba7f550826c8920f414d0230e9e1e4c71507 -Author: Matheus da luz -Date: Wed Jul 11 17:45:44 2018 -0300 - - if removed - -commit 7cbfa20f27f7289d4c408f3a49b6e16889637f39 -Author: matheusdaluz -Date: Wed Jul 11 17:06:51 2018 -0300 - - merge - -commit 1dccd2e31cb00cb236673077241e337068467daa -Author: matheusdaluz -Date: Wed Jul 11 17:01:41 2018 -0300 - - krallistic - -commit dc3c9fab01f9a98872c3ff59ce120867ca72cc5f -Merge: 440fe70 abb1be5 -Author: matheusdaluz -Date: Wed Jul 11 17:00:16 2018 -0300 - - krallistic - -commit 440fe7047c597a81354bfb68062398eb73dade43 -Author: matheusdaluz -Date: Wed Jul 11 16:59:36 2018 -0300 - - new package - -commit abb1be5bed8dd3b8256076edc3877475fc601f55 -Author: Matheus da luz -Date: Wed Jul 11 16:22:54 2018 -0300 - - Update kafka_exporter.go - -commit a0071e741e8ad1bfc89da01ddf552ed4212a09bc -Author: Matheus.Costa -Date: Wed Jul 11 16:21:37 2018 -0300 - - Fix - -commit 522202e76c9fbc1e6ce01651ec141237fc7587bb -Author: Matheus.Costa -Date: Wed Jul 11 16:18:28 2018 -0300 - - get lag from zookeeper - -commit 830660212e6c109e69dcb1cb58f5159fe3b38903 -Merge: 7aa9c77 1c42bfb -Author: Daniel (Shijun) Qian -Date: Sat Jul 7 22:31:15 2018 +0800 - - Merge pull request #47 from danielqsj/rel-1.2 - - Release 1.2.0 - -commit 1c42bfb297ce7e44f5139b1e3fd0de541dda40ad -Author: danielqsj -Date: Sat Jul 7 22:28:32 2018 +0800 - - Release 1.2.0 - -commit 7aa9c7729fee557ef7902577fe9215ff2ccb24c2 -Merge: 54278f0 ae33f02 -Author: Daniel (Shijun) Qian -Date: Sat Jul 7 21:57:03 2018 +0800 - - Merge pull request #46 from danielqsj/sarama - - Update sarama to v1.17.0 - -commit ae33f0266dbe7480db480c8d6f614bb3256fc4b8 -Author: danielqsj -Date: Sat Jul 7 21:16:19 2018 +0800 - - Update sarama to v1.17.0 - -commit 54278f0587bf8e2fa7c0b879d1da244535572e3a -Merge: 112ce08 0640680 -Author: Daniel (Shijun) Qian -Date: Sat Jul 7 14:40:12 2018 +0800 - - Merge pull request #43 from gburanov/gburanov_error_fix - - Check for sarama error - -commit 112ce08b41727f68c7650d8b56c7a35829b5d49b -Merge: b87796c cce2b0b -Author: Daniel (Shijun) Qian -Date: Sat Jul 7 14:37:01 2018 +0800 - - Merge pull request #39 from piclemx/master - - Adding filter for groups - -commit 0640680ff004512db7da5e14bed0d5919daef38d -Author: Georgy Buranov -Date: Mon Jun 18 16:58:34 2018 +0200 - - Check for sarama error - -commit cce2b0be679af0ee9829a2d01f59ffdf8c9844d6 -Author: Alexandre Picard-Lemieux -Date: Tue Jun 5 11:34:07 2018 -0400 - - Fix flag description - -commit 248eb8c35a93029ab7bd6ea1dfef9a5dffba8594 -Author: Alexandre Picard-Lemieux -Date: Tue Jun 5 11:26:02 2018 -0400 - - Adding filter for groups - -commit b87796c32ab4376fe6d47fb17df6a41cca21b046 -Merge: 84cee7e 6f5328f -Author: Daniel (Shijun) Qian -Date: Mon May 21 16:55:04 2018 +0800 - - Merge pull request #34 from chenwumail/master - - Create grafana dashboard (Prometheus) - -commit 6f5328fe3224d4aff093fdf1cad35a0d1e5125e6 -Author: chenwumail <31205300+chenwumail@users.noreply.github.com> -Date: Sat May 19 10:46:58 2018 +0800 - - add kafka_export_overview demo picture - -commit e40175cb71faecb34a5dd2589c937adfeda92767 -Author: chenwumail <31205300+chenwumail@users.noreply.github.com> -Date: Sat May 19 10:32:53 2018 +0800 - - Create grafana dashboard (Prometheus) - -commit 84cee7e0672f0161c05a93557cc2794a48c8a024 -Author: danielqsj -Date: Sat Apr 7 20:13:17 2018 +0800 - - Update pic - -commit e3f66e2905895e7e8bd6d8af6ee7379f5c867472 -Merge: ade40b4 5a76c64 -Author: Daniel (Shijun) Qian -Date: Sat Apr 7 20:11:01 2018 +0800 - - Merge pull request #26 from danielqsj/fix - - Update pic - -commit 5a76c64442509c6a898a243443d50bd37b9522ca -Author: danielqsj -Date: Sat Apr 7 20:09:28 2018 +0800 - - Update pic - -commit ade40b4bfb38f751388524c825c33cafc13976d4 -Merge: 47a04e7 7cb402a -Author: Daniel (Shijun) Qian -Date: Sat Apr 7 20:06:26 2018 +0800 - - Merge pull request #25 from danielqsj/fix - - Enhancement - -commit 7cb402a8d60b5b060499036f4ecd1b3f4f5f3347 -Author: danielqsj -Date: Sat Apr 7 20:02:35 2018 +0800 - - Release 1.1.0 - -commit ab154640fce55d9b5cdc53dc22b3bbe8c344936e -Author: danielqsj -Date: Sat Apr 7 17:44:37 2018 +0800 - - Accelerate metrics collecting - -commit fc92a2eae436621a4399a9c7b85f64360469ed0d -Author: danielqsj -Date: Sat Apr 7 16:48:13 2018 +0800 - - Fast return if error when get topics - -commit 1fb0d449508a6d832b0263d88d50b8f75a1f0031 -Author: danielqsj -Date: Sat Apr 7 16:25:56 2018 +0800 - - Collect consumer offset from alive brokers - -commit 5d63af095c72d59af931b4211a1603a03161a71f -Author: danielqsj -Date: Sat Apr 7 16:05:48 2018 +0800 - - Support inactive consumer group offset and lag - -commit ca0094286ffad3b2fad35d7fadef6280632d87a4 -Author: danielqsj -Date: Fri Apr 6 20:38:21 2018 +0800 - - Remove unused variable - -commit 47a04e76e00acc082c63bf988140362b6e0a917d -Merge: 501fb60 1cbb590 -Author: Daniel (Shijun) Qian -Date: Fri Apr 6 15:35:58 2018 +0800 - - Merge pull request #24 from danielqsj/update-vendor - - Update vendor - -commit 1cbb590176de488baa0e60ead6f82920c539b928 -Author: danielqsj -Date: Fri Apr 6 15:32:12 2018 +0800 - - Remove redundant alias - -commit c005f471dcfd8db9172729555dc906f5c3f03ab0 -Author: danielqsj -Date: Fri Apr 6 15:30:14 2018 +0800 - - Update sarama to 1.16.0 - -commit 501fb604a53fda51c9ed428c47b1f2318a3677c7 -Merge: 135c2b6 83d5579 -Author: Daniel (Shijun) Qian -Date: Mon Apr 2 09:43:40 2018 +0800 - - Merge pull request #21 from gpaggi/master - - Make Kafka version configurable + fix logging - -commit 83d55792c2c9f4fc10be4a4e56c87b653a4a015c -Merge: be4dc1c f44f172 -Author: Gabriele Paggi -Date: Fri Mar 30 14:23:41 2018 +0200 - - Merge pull request #2 from Crypto89/master - - Fix memoryleak in go-metrics - -commit f44f1724cb243345c9aec3d42a6d3a03bdbfa8f0 -Author: Jorn Wijnands -Date: Fri Mar 30 14:20:46 2018 +0200 - - Remove pprof - -commit fffced7cd57ffa68ed054d8df8ea1296f1d9fef8 -Author: Jorn Wijnands -Date: Fri Mar 30 14:18:02 2018 +0200 - - Fix memory leak in go-metrics - -commit be4dc1c3b33f53a0bb7318ffc9298effb07bc0f7 -Author: Gabriele Paggi -Date: Sat Mar 17 17:22:56 2018 +0100 - - Release 1.0.2 - -commit 42042a812dca400eec8b6b2f40b23605df4bb4fa -Merge: 3ea1455 df19c87 -Author: Gabriele Paggi -Date: Thu Mar 15 14:18:33 2018 +0100 - - Merge pull request #1 from Crypto89/master - - Make kafka version configurable - -commit df19c87d3c6ea1310de824e738dc9bf9505b53c3 -Author: Jorn Wijnands -Date: Thu Mar 15 14:14:45 2018 +0100 - - Bump version - -commit 9f7cdeb6aa6b2d2eb913f00047ea021e398c7d29 -Author: Jorn Wijnands -Date: Thu Mar 15 14:14:03 2018 +0100 - - Make kafka version configurable - -commit 3ea1455d737c5e2c3a5b7fe8f1287e76fd18d66d -Author: Gabriele Paggi -Date: Thu Mar 15 11:52:46 2018 +0100 - - Version bump for release - -commit 135c2b6b456ba7d6fcc124145db0d4036cfc9b01 -Merge: f0203b6 ada1715 -Author: Daniel (Shijun) Qian -Date: Mon Mar 5 11:28:18 2018 +0800 - - Merge pull request #17 from gpaggi/master - - No offset associated with a topic-partition under that consumer group - -commit f0203b61d2b3fbce53595550a6cd84d59d1146d1 -Merge: acb4af9 fb848e3 -Author: Daniel (Shijun) Qian -Date: Mon Mar 5 11:25:26 2018 +0800 - - Merge pull request #19 from daveworth/cleanup/use-integer-formats-in-logging - - use integer format specifications for partitions - -commit acb4af98b9c269cb4d5b9dadce04a9c4ffff975b -Merge: b5b6ff7 5c60ff5 -Author: Daniel (Shijun) Qian -Date: Mon Mar 5 11:23:36 2018 +0800 - - Merge pull request #18 from daveworth/bugfix/typo-in-offset-error-message - - fix typo in offset-fetch error message - -commit fb848e32933549a923da326847e5c7ff8dc0545c -Author: David Worth -Date: Thu Mar 1 14:17:50 2018 -0700 - - use integer format specifications for partitions - - One of the metalinters was complaining about this and it will - make the semi-confusing logs more obvious. - -commit 5c60ff5f3d66aaf62042286af5cf29a4b16b6cca -Author: David Worth -Date: Thu Mar 1 13:55:44 2018 -0700 - - fix typo in offset-fetch error message - -commit ada171570872b28b2bcf0358dc671460f8dd9e17 -Author: Gabriele Paggi -Date: Thu Mar 1 16:58:50 2018 +0100 - - Cover the case where Kafka will return offset -1, and no error, if there is no offset associated with a topic-partition under that consumer group - -commit b5b6ff7dd341599a069d9e3c2abe41556a055fc8 -Merge: f1639a6 6d9e3d4 -Author: Daniel (Shijun) Qian -Date: Mon Feb 12 15:23:29 2018 +0800 - - Merge pull request #15 from ekarak/patch-1 - - guard against empty replicas list - -commit 6d9e3d4c9241990145cb86193ccd07218b09466e -Author: Elias Karakoulakis -Date: Mon Feb 12 09:17:02 2018 +0200 - - guard against empty replicas list - -commit f1639a649ebcfe11bce6782ab281a59d05b6d9e9 -Author: danielqsj -Date: Fri Jan 12 21:03:06 2018 +0800 - - Release 1.0.1 - -commit cd611565df887b871d719e66362a0254c4886e5e -Author: danielqsj -Date: Fri Jan 12 17:51:05 2018 +0800 - - Fix bug of sasl handshake - -commit 91bcf3282f7dc4d501ab1c8540e7702d93126bc4 -Author: danielqsj -Date: Mon Jan 8 23:02:46 2018 +0800 - - Release 1.0.0 - -commit b647e3a719767823c01d68985b6b2e6798bad06a -Author: danielqsj -Date: Mon Jan 8 22:59:38 2018 +0800 - - Update doc - -commit 850362bf9a0ecedc20aac912ac50d54bf00ef0e8 -Author: danielqsj -Date: Mon Jan 8 22:56:29 2018 +0800 - - Update doc - -commit 5eddc28357274fe2fadfb2ebf2c5f3834e975a1b -Author: danielqsj -Date: Mon Jan 8 22:55:08 2018 +0800 - - Update doc - -commit 83d04a06c2ae3d9262a40c5c225909e142762f9c -Author: danielqsj -Date: Mon Jan 8 22:53:02 2018 +0800 - - Update doc - -commit 1794a853b9c23caba6975b8ea5a5ba24d944d972 -Author: danielqsj -Date: Mon Jan 8 22:48:42 2018 +0800 - - Add help to kingpin - -commit cb1e40cfddfaf4f1e0f10a37482069773c2bd6f6 -Author: danielqsj -Date: Mon Jan 8 20:57:01 2018 +0800 - - Format variable - -commit 4d65832d36a07e1893b2a9ab156972e55091c128 -Author: danielqsj -Date: Mon Jan 8 18:20:08 2018 +0800 - - Update dockerfile - -commit a7ad65fa2944d589fccf9ec6baf3f3bea5ce5010 -Author: danielqsj -Date: Mon Jan 8 18:17:53 2018 +0800 - - Update param help - -commit af143b31cff38265c67f36050787519e4c67e56d -Author: danielqsj -Date: Mon Jan 8 18:16:03 2018 +0800 - - Enhance tls - -commit 83dfac7a5ff7ca0c4e08f60095f07d2d704e4a24 -Author: danielqsj -Date: Mon Jan 8 17:10:09 2018 +0800 - - Update param - -commit 85357f37e590af2e7e6e09ab91f4bf4e6fe1bf43 -Author: danielqsj -Date: Mon Jan 8 17:00:52 2018 +0800 - - Add option to turn down SASL handshake - -commit 68a28ebb90d431822ace8910daf96794f48b2788 -Author: danielqsj -Date: Mon Jan 8 14:10:58 2018 +0800 - - Change sarama client id - -commit dd0713e6da9c5b44ec9588f4137b71474722684f -Author: danielqsj -Date: Mon Jan 8 14:07:42 2018 +0800 - - Add log.level and log.enableSarama - -commit 74b5048b8107e2e297d972506567f1589e44ab84 -Author: danielqsj -Date: Mon Jan 8 13:59:13 2018 +0800 - - Add option to enable sarama logging - -commit da5a4975eab59b565f186f3ac998df62c57f18fd -Author: danielqsj -Date: Mon Jan 8 10:35:36 2018 +0800 - - Update sarama to v1.15.0 - -commit 73401c7357187dec15d95bceea70c02f7ebaa2e1 -Merge: bccac40 98d9ef0 -Author: Daniel (Shijun) Qian -Date: Thu Jan 4 09:40:03 2018 +0800 - - Merge pull request #5 from gpaggi/master - - Add TLS support and fix concurrency issue while updating struct - -commit 98d9ef07f6d359e43160c5a9c9ef7f4ff838b8ba -Author: Gabriele Paggi -Date: Wed Jan 3 20:08:34 2018 +0100 - - Configure version when initializing the client. If done later on it overrides the whole config, wiping the SASL configuration, if any - -commit b1f7575f47d09cb02fcc83c8ebe31fe36abf478c -Author: Gabriele Paggi -Date: Tue Jan 2 16:12:02 2018 +0100 - - Fix concurrency issue - -commit 109595308407ec1877dca13437586d45d7ffb671 -Author: Gabriele Paggi -Date: Tue Jan 2 12:01:56 2018 +0100 - - Add support for SSL - -commit bccac4045e055ce97a7407a0bde5dcffa317e7a7 -Author: danielqsj -Date: Fri Dec 22 10:27:19 2017 +0800 - - Update doc - -commit 77161033720d842d22e52a02250934ca83a3c42c -Author: danielqsj -Date: Wed Dec 6 16:59:37 2017 +0800 - - Release 0.3.0 - -commit efb379bec9fbc5457ce5efcd152af4cbf111da3f -Author: danielqsj -Date: Wed Dec 6 16:34:36 2017 +0800 - - Update doc - -commit c6ee116579491380a0a04feb1022d6bf75f868af -Author: danielqsj -Date: Wed Dec 6 16:28:22 2017 +0800 - - Update dependency constraint - -commit d045495d87d04e9019d89b2b145ae99ed896544b -Author: danielqsj -Date: Wed Dec 6 16:26:44 2017 +0800 - - Update doc - -commit b9aba6b3361cadd1bb05ec08c6c3cc11bc73695f -Author: danielqsj -Date: Wed Dec 6 16:24:51 2017 +0800 - - Update param description - -commit 4ad3090574d2318e81326f253106c51e44b6289b -Author: danielqsj -Date: Wed Dec 6 16:02:04 2017 +0800 - - Close sarama client when exit - -commit 10003b6dd108b70308d108d45dfaf9cedd0a1f88 -Author: danielqsj -Date: Wed Dec 6 15:57:58 2017 +0800 - - Fix topic sync - -commit fbcdb27c6c9cfb69c9d8dfd93ccbd9932afa3ea1 -Author: danielqsj -Date: Wed Dec 6 15:19:36 2017 +0800 - - Add support for topic filter - -commit cc40763808089bac02ee46d34514801904d0e4a4 -Author: danielqsj -Date: Wed Dec 6 14:23:09 2017 +0800 - - set default value for sasl.enabled - -commit e0a817d8b949b0f28707d21cccf87c72cdfa070b -Author: danielqsj -Date: Wed Dec 6 14:19:54 2017 +0800 - - Update sarama to 1.14.0 - -commit 3cca26acc510201e4e62b9af802460c0a88270e3 -Author: danielqsj -Date: Wed Dec 6 14:01:29 2017 +0800 - - Update gitignore and ci config - -commit 019a4322d8446cc562f55f96ae78fbc8eb4fad98 -Merge: 9aecc7d 9c294a9 -Author: Daniel (Shijun) Qian -Date: Sun Dec 3 19:22:23 2017 -0600 - - Merge pull request #3 from wakeful/feature/add_sasl_support - - add support for configuring SASL user & passwd - -commit 9c294a976ddd4075f0cbaf9b1e842c3d88560409 -Author: AJ -Date: Tue Nov 21 18:55:02 2017 +0000 - - add support for configuring SASL user & passwd - -commit 9aecc7d5965edf2230f8eb6992103396dfc11041 -Author: danielqsj -Date: Fri Oct 20 15:40:02 2017 +0800 - - release 0.2.0 - -commit 67070af539a0d4c41fb09b24879871fc1456b00f -Author: danielqsj -Date: Fri Oct 20 14:53:44 2017 +0800 - - Change port to 9308 and accept multiple addresses - -commit 7576d6aa0abd77ae33c982936fac83ec273ce099 -Author: danielqsj -Date: Thu Oct 19 17:44:07 2017 +0800 - - Update CI - -commit f8f300d1d00661a7178b6f830b01417f01c66e61 -Author: danielqsj -Date: Thu Oct 19 17:15:22 2017 +0800 - - Update CI - -commit d2892ccf7ed1ee96888b64ebb10b164f4bfa4400 -Author: danielqsj -Date: Thu Oct 19 16:31:50 2017 +0800 - - Update CI - -commit bf59918794f3c7261a93e4f4a2f51a03456bebbc -Author: danielqsj -Date: Thu Oct 19 16:29:42 2017 +0800 - - Update CI - -commit a06c140cd4c8591a9f04e5e7d93ca7d4801d064c -Author: danielqsj -Date: Thu Oct 19 16:25:32 2017 +0800 - - Update doc - -commit 5cf1d2f6ef73dfb1619719caf4b406c77a1837aa -Author: danielqsj -Date: Thu Oct 19 15:02:42 2017 +0800 - - Improve CI - -commit cf1e830c66812521505d6df8ebf4cf23422b7b93 -Author: danielqsj -Date: Thu Oct 19 14:25:27 2017 +0800 - - Update doc - -commit 2721ee48b6fac14bc070d9e5eaa1fba2738af16a -Author: danielqsj -Date: Thu Oct 19 14:21:47 2017 +0800 - - Add ci - -commit fc17588dfbc59191f20ee3e2b3354cf97827be6a -Author: danielqsj -Date: Thu Oct 19 14:18:34 2017 +0800 - - Update README.md - -commit 8a835b17e323a4f60a41ef73ca2f8d58663daec4 -Author: danielqsj -Date: Thu Oct 19 14:06:39 2017 +0800 - - Update README.md - -commit 666588248557814e069d1a0a9e7ce9e72864e505 -Author: danielqsj -Date: Thu Oct 19 14:01:29 2017 +0800 - - Format name of metrics - -commit a4fcd7b7be7ad06a9e327b299209bd8596efd47c -Author: Daniel (Shijun) Qian -Date: Wed Oct 18 17:55:01 2017 +0800 - - Update README.md - -commit ceaaee5e2da2af8754e282f64f2cd6bd1bcfcedb -Author: Daniel (Shijun) Qian -Date: Wed Oct 18 17:49:49 2017 +0800 - - Update README.md - -commit 41f39ada36ae297dfe7696497af673d7709d7812 -Author: Daniel (Shijun) Qian -Date: Wed Oct 18 17:46:42 2017 +0800 - - Update README.md - -commit 14bfbe1e4ec3b376825423acec777109345a4cd5 -Author: danielqsj -Date: Wed Oct 18 15:29:45 2017 +0800 - - Update 0.1.0 - -commit 36ddbe453c51c26bc8d99fd6b6575e12fc46351f -Author: Daniel (Shijun) Qian -Date: Fri Sep 15 11:23:21 2017 +0800 - - Initial commit diff --git a/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md b/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md deleted file mode 100644 index b0f107cb..00000000 --- a/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing - -Contributions are always welcome, both reporting issues and submitting pull requests! - -### Reporting issues - -Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth. - -- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version. -- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description. -- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it. - -Also, please include the following information about your environment, so we can help you faster: - -- What version of Kafka are you using? -- What version of Go are you using? -- What are the values of your Producer/Consumer/Client configuration? - - -### Submitting pull requests - -We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following. - -- If you plan to work on something major, please open an issue to discuss the design first. -- Don't break backwards compatibility. If you really have to, open an issue to discuss this first. -- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving. -- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs. -- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors. -- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems. -- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions. -- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions diff --git a/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md b/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index 7ccafb62..00000000 --- a/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,20 +0,0 @@ -##### Versions - -*Please specify real version numbers or git SHAs, not just "Latest" since that changes fairly regularly.* -Sarama Version: -Kafka Version: -Go Version: - -##### Configuration - -What configuration values are you using for Sarama and Kafka? - -##### Logs - -When filing an issue please provide logs from Sarama and Kafka if at all -possible. You can set `sarama.Logger` to a `log.Logger` to capture Sarama debug -output. - -##### Problem Description - - diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore index 6e362e4f..2c9adc20 100644 --- a/vendor/github.com/Shopify/sarama/.gitignore +++ b/vendor/github.com/Shopify/sarama/.gitignore @@ -25,3 +25,5 @@ _testmain.go coverage.txt profile.out + +simplest-uncommitted-msg-0.1-jar-with-dependencies.jar diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml new file mode 100644 index 00000000..ce2b5230 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/.golangci.yml @@ -0,0 +1,77 @@ +run: + timeout: 5m + deadline: 10m + +linters-settings: + govet: + check-shadowing: false + golint: + min-confidence: 0 + gocyclo: + min-complexity: 99 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 3 + misspell: + locale: US + goimports: + local-prefixes: github.com/Shopify/sarama + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - wrapperFunc + - ifElseChain + funlen: + lines: 300 + statements: 300 + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + # - dupl + - errcheck + - funlen + # - gocritic + - gocyclo + - gofmt + - goimports + # - golint + - gosec + # - gosimple + - govet + # - ineffassign + - interfacer + # - misspell + # - nakedret + # - scopelint + # - staticcheck + - structcheck + # - stylecheck + - typecheck + - unconvert + - unused + - varcheck + - whitespace + # - goconst + - gochecknoinits + +issues: + exclude: + - consider giving a name to these results + - include an explanation for nolint directive + - Potential Integer overflow made by strconv.Atoi result conversion to int16/32 + - Use of weak random number generator + - TLS MinVersion too low diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml deleted file mode 100644 index fe694e57..00000000 --- a/vendor/github.com/Shopify/sarama/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -language: go -go: -- 1.9.7 -- 1.10.4 -- 1.11 - -env: - global: - - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095 - - TOXIPROXY_ADDR=http://localhost:8474 - - KAFKA_INSTALL_ROOT=/home/travis/kafka - - KAFKA_HOSTNAME=localhost - - DEBUG=true - matrix: - - KAFKA_VERSION=1.0.0 - - KAFKA_VERSION=1.1.0 - - KAFKA_VERSION=2.0.0 - -before_install: -- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} -- vagrant/install_cluster.sh -- vagrant/boot_cluster.sh -- vagrant/create_topics.sh - -install: make install_dependencies - -script: -- make test -- make vet -- make errcheck -- if [ "$TRAVIS_GO_VERSION" = "1.11" ]; then make fmt; fi - -after_success: -- bash <(curl -s https://codecov.io/bash) - -after_script: vagrant/halt_cluster.sh diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md index 47fb15b1..59ccd1de 100644 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -1,5 +1,431 @@ # Changelog +#### Unreleased + +#### Version 1.28.0 (2021-02-15) + +**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** + +- #1870 - @kvch - Update Kerberos library to latest major +- #1876 - @bai - Update docs, reference pkg.go.dev +- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close +- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages +- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies +- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy +- #1862 - @bai - Fix CI setenv permissions issues +- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev +- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica + +#### Version 1.27.2 (2020-10-21) + +# Improvements + +#1750 - @krantideep95 Adds missing mock responses for mocking consumer group + +# Fixes + +#1817 - reverts #1785 - Add private method to Client interface to prevent implementation + +#### Version 1.27.1 (2020-10-07) + +# Improvements + +#1775 - @d1egoaz - Adds a Producer Interceptor example +#1781 - @justin-chen - Refresh brokers given list of seed brokers +#1784 - @justin-chen - Add randomize seed broker method +#1790 - @d1egoaz - remove example binary +#1798 - @bai - Test against Go 1.15 +#1785 - @justin-chen - Add private method to Client interface to prevent implementation +#1802 - @uvw - Support Go 1.13 error unwrapping + +# Fixes + +#1791 - @stanislavkozlovski - bump default version to 1.0.0 + +#### Version 1.27.0 (2020-08-11) + +# Improvements + +#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration +#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests +#1699 - @wclaeys - Consumer group support for manually comitting offsets +#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 +#1726 - @d1egoaz - Include zstd on the functional tests +#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors +#1738 - @varun06 - fixed variable names that are named same as some std lib package names +#1741 - @varun06 - updated zstd dependency to latest v1.10.10 +#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base +#1763 - @alrs - remove deprecated tls options from test +#1769 - @bai - Add support for Kafka 2.6.0 + +# Fixes + +#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +#1744 - @alrs - Fix isBalanced Function Signature + +#### Version 1.26.4 (2020-05-19) + +# Fixes + +- #1701 - @d1egoaz - Set server name only for the current broker +- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka + +#### Version 1.26.3 (2020-05-07) + +# Fixes + +- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config + +#### Version 1.26.2 (2020-05-06) + +# âš ï¸ Known Issues + +This release has been marked as not ready for production and may be unstable, please use v1.26.4. + +# Improvements + +- #1560 - @iyacontrol - add sync pool for gzip 1-9 +- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID +- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs +- #1632 - @bai - Add support for Go 1.14 +- #1640 - @random-dwi - Feature/fix list partition reassignments +- #1646 - @mimaison - Add DescribeLogDirs to admin client +- #1667 - @bai - Add support for kafka 2.5.0 + +# Fixes + +- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 +- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine +- #1602 - @d1egoaz - adds a note about consumer groups Consume method +- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly +- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented +- #1614 - @alrs - produce_response.go: Remove Unused Functions +- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables +- #1639 - @agriffaut - Handle errors with no message but error code +- #1643 - @kzinglzy - fix `config.net.keepalive` +- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs +- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata +- #1650 - @lavoiesl - Return the response error in heartbeatLoop +- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die +- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. + +#### Version 1.26.1 (2020-02-04) + +Improvements: +- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) +- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595)) +- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573)) +- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592)) + +Bug Fixes: +- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) +- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) + +#### Version 1.26.0 (2020-01-24) + +New Features: +- Enable zstd compression + ([1574](https://github.com/Shopify/sarama/pull/1574), + [1582](https://github.com/Shopify/sarama/pull/1582)) +- Support headers in tools kafka-console-producer + ([1549](https://github.com/Shopify/sarama/pull/1549)) + +Improvements: +- Add SASL AuthIdentity to SASL frames (authzid) + ([1585](https://github.com/Shopify/sarama/pull/1585)). + +Bug Fixes: +- Sending messages with ZStd compression enabled fails in multiple ways + ([1252](https://github.com/Shopify/sarama/issues/1252)). +- Use the broker for any admin on BrokerConfig + ([1571](https://github.com/Shopify/sarama/pull/1571)). +- Set DescribeConfigRequest Version field + ([1576](https://github.com/Shopify/sarama/pull/1576)). +- ConsumerGroup flooding logs with client/metadata update req + ([1578](https://github.com/Shopify/sarama/pull/1578)). +- MetadataRequest version in DescribeCluster + ([1580](https://github.com/Shopify/sarama/pull/1580)). +- Fix deadlock in consumer group handleError + ([1581](https://github.com/Shopify/sarama/pull/1581)) +- Fill in the Fetch{Request,Response} protocol + ([1582](https://github.com/Shopify/sarama/pull/1582)). +- Retry topic request on ControllerNotAvailable + ([1586](https://github.com/Shopify/sarama/pull/1586)). + +#### Version 1.25.0 (2020-01-13) + +New Features: +- Support TLS protocol in kafka-producer-performance + ([1538](https://github.com/Shopify/sarama/pull/1538)). +- Add support for kafka 2.4.0 + ([1552](https://github.com/Shopify/sarama/pull/1552)). + +Improvements: +- Allow the Consumer to disable auto-commit offsets + ([1164](https://github.com/Shopify/sarama/pull/1164)). +- Produce records with consistent timestamps + ([1455](https://github.com/Shopify/sarama/pull/1455)). + +Bug Fixes: +- Fix incorrect SetTopicMetadata name mentions + ([1534](https://github.com/Shopify/sarama/pull/1534)). +- Fix client.tryRefreshMetadata Println + ([1535](https://github.com/Shopify/sarama/pull/1535)). +- Fix panic on calling updateMetadata on closed client + ([1531](https://github.com/Shopify/sarama/pull/1531)). +- Fix possible faulty metrics in TestFuncProducing + ([1545](https://github.com/Shopify/sarama/pull/1545)). + +#### Version 1.24.1 (2019-10-31) + +New Features: +- Add DescribeLogDirs Request/Response pair + ([1520](https://github.com/Shopify/sarama/pull/1520)). + +Bug Fixes: +- Fix ClusterAdmin returning invalid controller ID on DescribeCluster + ([1518](https://github.com/Shopify/sarama/pull/1518)). +- Fix issue with consumergroup not rebalancing when new partition is added + ([1525](https://github.com/Shopify/sarama/pull/1525)). +- Ensure consistent use of read/write deadlines + ([1529](https://github.com/Shopify/sarama/pull/1529)). + +#### Version 1.24.0 (2019-10-09) + +New Features: +- Add sticky partition assignor + ([1416](https://github.com/Shopify/sarama/pull/1416)). +- Switch from cgo zstd package to pure Go implementation + ([1477](https://github.com/Shopify/sarama/pull/1477)). + +Improvements: +- Allow creating ClusterAdmin from client + ([1415](https://github.com/Shopify/sarama/pull/1415)). +- Set KafkaVersion in ListAcls method + ([1452](https://github.com/Shopify/sarama/pull/1452)). +- Set request version in CreateACL ClusterAdmin method + ([1458](https://github.com/Shopify/sarama/pull/1458)). +- Set request version in DeleteACL ClusterAdmin method + ([1461](https://github.com/Shopify/sarama/pull/1461)). +- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest + ([1464](https://github.com/Shopify/sarama/pull/1464)). +- Remove direct usage of gofork + ([1465](https://github.com/Shopify/sarama/pull/1465)). +- Add support for Go 1.13 + ([1478](https://github.com/Shopify/sarama/pull/1478)). +- Improve behavior of NewMockListAclsResponse + ([1481](https://github.com/Shopify/sarama/pull/1481)). + +Bug Fixes: +- Fix race condition in consumergroup example + ([1434](https://github.com/Shopify/sarama/pull/1434)). +- Fix brokerProducer goroutine leak + ([1442](https://github.com/Shopify/sarama/pull/1442)). +- Use released version of lz4 library + ([1469](https://github.com/Shopify/sarama/pull/1469)). +- Set correct version in MockDeleteTopicsResponse + ([1484](https://github.com/Shopify/sarama/pull/1484)). +- Fix CLI help message typo + ([1494](https://github.com/Shopify/sarama/pull/1494)). + +Known Issues: +- Please **don't** use Zstd, as it doesn't work right now. + See https://github.com/Shopify/sarama/issues/1252 + +#### Version 1.23.1 (2019-07-22) + +Bug Fixes: +- Fix fetch delete bug record + ([1425](https://github.com/Shopify/sarama/pull/1425)). +- Handle SASL/OAUTHBEARER token rejection + ([1428](https://github.com/Shopify/sarama/pull/1428)). + +#### Version 1.23.0 (2019-07-02) + +New Features: +- Add support for Kafka 2.3.0 + ([1418](https://github.com/Shopify/sarama/pull/1418)). +- Add support for ListConsumerGroupOffsets v2 + ([1374](https://github.com/Shopify/sarama/pull/1374)). +- Add support for DeleteConsumerGroup + ([1417](https://github.com/Shopify/sarama/pull/1417)). +- Add support for SASLVersion configuration + ([1410](https://github.com/Shopify/sarama/pull/1410)). +- Add kerberos support + ([1366](https://github.com/Shopify/sarama/pull/1366)). + +Improvements: +- Improve sasl_scram_client example + ([1406](https://github.com/Shopify/sarama/pull/1406)). +- Fix shutdown and race-condition in consumer-group example + ([1404](https://github.com/Shopify/sarama/pull/1404)). +- Add support for error codes 77—81 + ([1397](https://github.com/Shopify/sarama/pull/1397)). +- Pool internal objects allocated per message + ([1385](https://github.com/Shopify/sarama/pull/1385)). +- Reduce packet decoder allocations + ([1373](https://github.com/Shopify/sarama/pull/1373)). +- Support timeout when fetching metadata + ([1359](https://github.com/Shopify/sarama/pull/1359)). + +Bug Fixes: +- Fix fetch size integer overflow + ([1376](https://github.com/Shopify/sarama/pull/1376)). +- Handle and log throttled FetchResponses + ([1383](https://github.com/Shopify/sarama/pull/1383)). +- Refactor misspelled word Resouce to Resource + ([1368](https://github.com/Shopify/sarama/pull/1368)). + +#### Version 1.22.1 (2019-04-29) + +Improvements: +- Use zstd 1.3.8 + ([1350](https://github.com/Shopify/sarama/pull/1350)). +- Add support for SaslHandshakeRequest v1 + ([1354](https://github.com/Shopify/sarama/pull/1354)). + +Bug Fixes: +- Fix V5 MetadataRequest nullable topics array + ([1353](https://github.com/Shopify/sarama/pull/1353)). +- Use a different SCRAM client for each broker connection + ([1349](https://github.com/Shopify/sarama/pull/1349)). +- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 + ([1344](https://github.com/Shopify/sarama/pull/1344)). + +#### Version 1.22.0 (2019-04-09) + +New Features: +- Add Offline Replicas Operation to Client + ([1318](https://github.com/Shopify/sarama/pull/1318)). +- Allow using proxy when connecting to broker + ([1326](https://github.com/Shopify/sarama/pull/1326)). +- Implement ReadCommitted + ([1307](https://github.com/Shopify/sarama/pull/1307)). +- Add support for Kafka 2.2.0 + ([1331](https://github.com/Shopify/sarama/pull/1331)). +- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes + ([1331](https://github.com/Shopify/sarama/pull/1295)). + +Improvements: +- Unregister all broker metrics on broker stop + ([1232](https://github.com/Shopify/sarama/pull/1232)). +- Add SCRAM authentication example + ([1303](https://github.com/Shopify/sarama/pull/1303)). +- Add consumergroup examples + ([1304](https://github.com/Shopify/sarama/pull/1304)). +- Expose consumer batch size metric + ([1296](https://github.com/Shopify/sarama/pull/1296)). +- Add TLS options to console producer and consumer + ([1300](https://github.com/Shopify/sarama/pull/1300)). +- Reduce client close bookkeeping + ([1297](https://github.com/Shopify/sarama/pull/1297)). +- Satisfy error interface in create responses + ([1154](https://github.com/Shopify/sarama/pull/1154)). +- Please lint gods + ([1346](https://github.com/Shopify/sarama/pull/1346)). + +Bug Fixes: +- Fix multi consumer group instance crash + ([1338](https://github.com/Shopify/sarama/pull/1338)). +- Update lz4 to latest version + ([1347](https://github.com/Shopify/sarama/pull/1347)). +- Retry ErrNotCoordinatorForConsumer in new consumergroup session + ([1231](https://github.com/Shopify/sarama/pull/1231)). +- Fix cleanup error handler + ([1332](https://github.com/Shopify/sarama/pull/1332)). +- Fix rate condition in PartitionConsumer + ([1156](https://github.com/Shopify/sarama/pull/1156)). + +#### Version 1.21.0 (2019-02-24) + +New Features: +- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest + ([1236](https://github.com/Shopify/sarama/pull/1236)). +- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests + ([1178](https://github.com/Shopify/sarama/pull/1178)). +- Implement SASL/OAUTHBEARER + ([1240](https://github.com/Shopify/sarama/pull/1240)). + +Improvements: +- Add Go mod support + ([1282](https://github.com/Shopify/sarama/pull/1282)). +- Add error codes 73—76 + ([1239](https://github.com/Shopify/sarama/pull/1239)). +- Add retry backoff function + ([1160](https://github.com/Shopify/sarama/pull/1160)). +- Maintain metadata in the producer even when retries are disabled + ([1189](https://github.com/Shopify/sarama/pull/1189)). +- Include ReplicaAssignment in ListTopics + ([1274](https://github.com/Shopify/sarama/pull/1274)). +- Add producer performance tool + ([1222](https://github.com/Shopify/sarama/pull/1222)). +- Add support LogAppend timestamps + ([1258](https://github.com/Shopify/sarama/pull/1258)). + +Bug Fixes: +- Fix potential deadlock when a heartbeat request fails + ([1286](https://github.com/Shopify/sarama/pull/1286)). +- Fix consuming compacted topic + ([1227](https://github.com/Shopify/sarama/pull/1227)). +- Set correct Kafka version for DescribeConfigsRequest v1 + ([1277](https://github.com/Shopify/sarama/pull/1277)). +- Update kafka test version + ([1273](https://github.com/Shopify/sarama/pull/1273)). + +#### Version 1.20.1 (2019-01-10) + +New Features: +- Add optional replica id in offset request + ([1100](https://github.com/Shopify/sarama/pull/1100)). + +Improvements: +- Implement DescribeConfigs Request + Response v1 & v2 + ([1230](https://github.com/Shopify/sarama/pull/1230)). +- Reuse compression objects + ([1185](https://github.com/Shopify/sarama/pull/1185)). +- Switch from png to svg for GoDoc link in README + ([1243](https://github.com/Shopify/sarama/pull/1243)). +- Fix typo in deprecation notice for FetchResponseBlock.Records + ([1242](https://github.com/Shopify/sarama/pull/1242)). +- Fix typos in consumer metadata response file + ([1244](https://github.com/Shopify/sarama/pull/1244)). + +Bug Fixes: +- Revert to individual msg retries for non-idempotent + ([1203](https://github.com/Shopify/sarama/pull/1203)). +- Respect MaxMessageBytes limit for uncompressed messages + ([1141](https://github.com/Shopify/sarama/pull/1141)). + +#### Version 1.20.0 (2018-12-10) + +New Features: + - Add support for zstd compression + ([#1170](https://github.com/Shopify/sarama/pull/1170)). + - Add support for Idempotent Producer + ([#1152](https://github.com/Shopify/sarama/pull/1152)). + - Add support support for Kafka 2.1.0 + ([#1229](https://github.com/Shopify/sarama/pull/1229)). + - Add support support for OffsetCommit request/response pairs versions v1 to v5 + ([#1201](https://github.com/Shopify/sarama/pull/1201)). + - Add support support for OffsetFetch request/response pair up to version v5 + ([#1198](https://github.com/Shopify/sarama/pull/1198)). + +Improvements: + - Export broker's Rack setting + ([#1173](https://github.com/Shopify/sarama/pull/1173)). + - Always use latest patch version of Go on CI + ([#1202](https://github.com/Shopify/sarama/pull/1202)). + - Add error codes 61 to 72 + ([#1195](https://github.com/Shopify/sarama/pull/1195)). + +Bug Fixes: + - Fix build without cgo + ([#1182](https://github.com/Shopify/sarama/pull/1182)). + - Fix go vet suggestion in consumer group file + ([#1209](https://github.com/Shopify/sarama/pull/1209)). + - Fix typos in code and comments + ([#1228](https://github.com/Shopify/sarama/pull/1228)). + #### Version 1.19.0 (2018-09-27) New Features: diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile index 8fcf219f..a0586348 100644 --- a/vendor/github.com/Shopify/sarama/Makefile +++ b/vendor/github.com/Shopify/sarama/Makefile @@ -1,30 +1,31 @@ -default: fmt vet errcheck test +default: fmt get update test lint -# Taken from https://github.com/codecov/example-go#caveat-multiple-files -test: - echo "" > coverage.txt - for d in `go list ./... | grep -v vendor`; do \ - go test -p 1 -v -timeout 240s -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \ - if [ -f profile.out ]; then \ - cat profile.out >> coverage.txt; \ - rm profile.out; \ - fi \ - done - -vet: - go vet ./... - -# See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg -errcheck: - errcheck -ignorepkg fmt github.com/Shopify/sarama/... +GO := GO111MODULE=on GOPRIVATE=github.com/linkedin GOSUMDB=off go +GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG) +GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic + +FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go') +TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') + +get: + $(GO) get ./... + $(GO) mod verify + $(GO) mod tidy + +update: + $(GO) get -u -v all + $(GO) mod verify + $(GO) mod tidy fmt: - @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi + gofmt -s -l -w $(FILES) $(TESTS) -install_dependencies: install_errcheck get +lint: + GOFLAGS="-tags=functional" golangci-lint run -install_errcheck: - go get github.com/kisielk/errcheck +test: + $(GOTEST) ./... -get: - go get -t +.PHONY: test_functional +test_functional: + $(GOTEST) -tags=functional ./... diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md index b9970938..c57c3a78 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/Shopify/sarama/README.md @@ -1,39 +1,36 @@ -sarama -====== +# sarama -[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama) +[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama) [![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) [![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). -### Getting started +## Getting started -- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama). +- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama). - Mocks for testing are available in the [mocks](./mocks) subpackage. - The [examples](./examples) directory contains more elaborate example applications. - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). -### Compatibility and API stability +## Compatibility and API stability Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. This means we currently officially support -Go 1.8 through 1.11, and Kafka 1.0 through 2.0, although older releases are +Go 1.14 through 1.15, and Kafka 2.5 through 2.7, although older releases are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. A changelog is available [here](CHANGELOG.md). -### Contributing +## Contributing -* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md). -* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more - technical and design details. -* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) - contains a wealth of useful information. -* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. -* If you have any questions, just ask! +- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md). +- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. +- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. +- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. +- If you have any questions, just ask! diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile index f4b848a3..07d7ffb8 100644 --- a/vendor/github.com/Shopify/sarama/Vagrantfile +++ b/vendor/github.com/Shopify/sarama/Vagrantfile @@ -1,14 +1,8 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - # We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB MEMORY = 3072 -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "ubuntu/trusty64" +Vagrant.configure("2") do |config| + config.vm.box = "ubuntu/bionic64" config.vm.provision :shell, path: "vagrant/provision.sh" diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go index 51517359..50b689d1 100644 --- a/vendor/github.com/Shopify/sarama/acl_bindings.go +++ b/vendor/github.com/Shopify/sarama/acl_bindings.go @@ -1,17 +1,27 @@ package sarama +//Resource holds information about acl resource type type Resource struct { - ResourceType AclResourceType - ResourceName string + ResourceType AclResourceType + ResourceName string + ResourcePatternType AclResourcePatternType } -func (r *Resource) encode(pe packetEncoder) error { +func (r *Resource) encode(pe packetEncoder, version int16) error { pe.putInt8(int8(r.ResourceType)) if err := pe.putString(r.ResourceName); err != nil { return err } + if version == 1 { + if r.ResourcePatternType == AclPatternUnknown { + Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead") + r.ResourcePatternType = AclPatternLiteral + } + pe.putInt8(int8(r.ResourcePatternType)) + } + return nil } @@ -25,10 +35,18 @@ func (r *Resource) decode(pd packetDecoder, version int16) (err error) { if r.ResourceName, err = pd.getString(); err != nil { return err } + if version == 1 { + pattern, err := pd.getInt8() + if err != nil { + return err + } + r.ResourcePatternType = AclResourcePatternType(pattern) + } return nil } +//Acl holds information about acl type type Acl struct { Principal string Host string @@ -75,13 +93,14 @@ func (a *Acl) decode(pd packetDecoder, version int16) (err error) { return nil } +//ResourceAcls is an acl resource type type ResourceAcls struct { Resource Acls []*Acl } -func (r *ResourceAcls) encode(pe packetEncoder) error { - if err := r.Resource.encode(pe); err != nil { +func (r *ResourceAcls) encode(pe packetEncoder, version int16) error { + if err := r.Resource.encode(pe, version); err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go index 0b6ecbec..6d8a70e1 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_request.go +++ b/vendor/github.com/Shopify/sarama/acl_create_request.go @@ -1,6 +1,8 @@ package sarama +//CreateAclsRequest is an acl creation request type CreateAclsRequest struct { + Version int16 AclCreations []*AclCreation } @@ -10,7 +12,7 @@ func (c *CreateAclsRequest) encode(pe packetEncoder) error { } for _, aclCreation := range c.AclCreations { - if err := aclCreation.encode(pe); err != nil { + if err := aclCreation.encode(pe, c.Version); err != nil { return err } } @@ -19,6 +21,7 @@ func (c *CreateAclsRequest) encode(pe packetEncoder) error { } func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) { + c.Version = version n, err := pd.getArrayLength() if err != nil { return err @@ -36,25 +39,35 @@ func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) return nil } -func (d *CreateAclsRequest) key() int16 { +func (c *CreateAclsRequest) key() int16 { return 30 } -func (d *CreateAclsRequest) version() int16 { - return 0 +func (c *CreateAclsRequest) version() int16 { + return c.Version } -func (d *CreateAclsRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 +func (c *CreateAclsRequest) headerVersion() int16 { + return 1 } +func (c *CreateAclsRequest) requiredVersion() KafkaVersion { + switch c.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +//AclCreation is a wrapper around Resource and Acl type type AclCreation struct { Resource Acl } -func (a *AclCreation) encode(pe packetEncoder) error { - if err := a.Resource.encode(pe); err != nil { +func (a *AclCreation) encode(pe packetEncoder, version int16) error { + if err := a.Resource.encode(pe, version); err != nil { return err } if err := a.Acl.encode(pe); err != nil { diff --git a/vendor/github.com/Shopify/sarama/acl_create_request_test.go b/vendor/github.com/Shopify/sarama/acl_create_request_test.go deleted file mode 100644 index fb4b35c1..00000000 --- a/vendor/github.com/Shopify/sarama/acl_create_request_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package sarama - -import "testing" - -var ( - aclCreateRequest = []byte{ - 0, 0, 0, 1, - 3, // resource type = group - 0, 5, 'g', 'r', 'o', 'u', 'p', - 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', - 0, 4, 'h', 'o', 's', 't', - 2, // all - 2, // deny - } -) - -func TestCreateAclsRequest(t *testing.T) { - req := &CreateAclsRequest{ - AclCreations: []*AclCreation{{ - Resource: Resource{ - ResourceType: AclResourceGroup, - ResourceName: "group", - }, - Acl: Acl{ - Principal: "principal", - Host: "host", - Operation: AclOperationAll, - PermissionType: AclPermissionDeny, - }}, - }, - } - - testRequest(t, "create request", req, aclCreateRequest) -} diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go index 8a56f357..14b1b9e1 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_response.go +++ b/vendor/github.com/Shopify/sarama/acl_create_response.go @@ -2,6 +2,7 @@ package sarama import "time" +//CreateAclsResponse is a an acl response creation type type CreateAclsResponse struct { ThrottleTime time.Duration AclCreationResponses []*AclCreationResponse @@ -46,18 +47,23 @@ func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) return nil } -func (d *CreateAclsResponse) key() int16 { +func (c *CreateAclsResponse) key() int16 { return 30 } -func (d *CreateAclsResponse) version() int16 { +func (c *CreateAclsResponse) version() int16 { return 0 } -func (d *CreateAclsResponse) requiredVersion() KafkaVersion { +func (c *CreateAclsResponse) headerVersion() int16 { + return 0 +} + +func (c *CreateAclsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } +//AclCreationResponse is an acl creation response type type AclCreationResponse struct { Err KError ErrMsg *string diff --git a/vendor/github.com/Shopify/sarama/acl_create_response_test.go b/vendor/github.com/Shopify/sarama/acl_create_response_test.go deleted file mode 100644 index 65b934d9..00000000 --- a/vendor/github.com/Shopify/sarama/acl_create_response_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - createResponseWithError = []byte{ - 0, 0, 0, 100, - 0, 0, 0, 1, - 0, 42, - 0, 5, 'e', 'r', 'r', 'o', 'r', - } - - createResponseArray = []byte{ - 0, 0, 0, 100, - 0, 0, 0, 2, - 0, 42, - 0, 5, 'e', 'r', 'r', 'o', 'r', - 0, 0, - 255, 255, - } -) - -func TestCreateAclsResponse(t *testing.T) { - errmsg := "error" - resp := &CreateAclsResponse{ - ThrottleTime: 100 * time.Millisecond, - AclCreationResponses: []*AclCreationResponse{{ - Err: ErrInvalidRequest, - ErrMsg: &errmsg, - }}, - } - - testResponse(t, "response with error", resp, createResponseWithError) - - resp.AclCreationResponses = append(resp.AclCreationResponses, new(AclCreationResponse)) - - testResponse(t, "response array", resp, createResponseArray) -} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go index 4133dcea..41525225 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_request.go +++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go @@ -1,6 +1,8 @@ package sarama +//DeleteAclsRequest is a delete acl request type DeleteAclsRequest struct { + Version int Filters []*AclFilter } @@ -10,6 +12,7 @@ func (d *DeleteAclsRequest) encode(pe packetEncoder) error { } for _, filter := range d.Filters { + filter.Version = d.Version if err := filter.encode(pe); err != nil { return err } @@ -19,6 +22,7 @@ func (d *DeleteAclsRequest) encode(pe packetEncoder) error { } func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) { + d.Version = int(version) n, err := pd.getArrayLength() if err != nil { return err @@ -27,6 +31,7 @@ func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) d.Filters = make([]*AclFilter, n) for i := 0; i < n; i++ { d.Filters[i] = new(AclFilter) + d.Filters[i].Version = int(version) if err := d.Filters[i].decode(pd, version); err != nil { return err } @@ -40,9 +45,18 @@ func (d *DeleteAclsRequest) key() int16 { } func (d *DeleteAclsRequest) version() int16 { - return 0 + return int16(d.Version) +} + +func (c *DeleteAclsRequest) headerVersion() int16 { + return 1 } func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request_test.go b/vendor/github.com/Shopify/sarama/acl_delete_request_test.go deleted file mode 100644 index 2efdcb48..00000000 --- a/vendor/github.com/Shopify/sarama/acl_delete_request_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package sarama - -import "testing" - -var ( - aclDeleteRequestNulls = []byte{ - 0, 0, 0, 1, - 1, - 255, 255, - 255, 255, - 255, 255, - 11, - 3, - } - - aclDeleteRequest = []byte{ - 0, 0, 0, 1, - 1, // any - 0, 6, 'f', 'i', 'l', 't', 'e', 'r', - 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', - 0, 4, 'h', 'o', 's', 't', - 4, // write - 3, // allow - } - - aclDeleteRequestArray = []byte{ - 0, 0, 0, 2, - 1, - 0, 6, 'f', 'i', 'l', 't', 'e', 'r', - 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', - 0, 4, 'h', 'o', 's', 't', - 4, // write - 3, // allow - 2, - 0, 5, 't', 'o', 'p', 'i', 'c', - 255, 255, - 255, 255, - 6, - 2, - } -) - -func TestDeleteAclsRequest(t *testing.T) { - req := &DeleteAclsRequest{ - Filters: []*AclFilter{{ - ResourceType: AclResourceAny, - Operation: AclOperationAlterConfigs, - PermissionType: AclPermissionAllow, - }}, - } - - testRequest(t, "delete request nulls", req, aclDeleteRequestNulls) - - req.Filters[0].ResourceName = nullString("filter") - req.Filters[0].Principal = nullString("principal") - req.Filters[0].Host = nullString("host") - req.Filters[0].Operation = AclOperationWrite - - testRequest(t, "delete request", req, aclDeleteRequest) - - req.Filters = append(req.Filters, &AclFilter{ - ResourceType: AclResourceTopic, - ResourceName: nullString("topic"), - Operation: AclOperationDelete, - PermissionType: AclPermissionDeny, - }) - - testRequest(t, "delete request array", req, aclDeleteRequestArray) -} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go index b5e1c45e..cb630882 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_response.go +++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go @@ -2,20 +2,22 @@ package sarama import "time" +//DeleteAclsResponse is a delete acl response type DeleteAclsResponse struct { + Version int16 ThrottleTime time.Duration FilterResponses []*FilterResponse } -func (a *DeleteAclsResponse) encode(pe packetEncoder) error { - pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) +func (d *DeleteAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) - if err := pe.putArrayLength(len(a.FilterResponses)); err != nil { + if err := pe.putArrayLength(len(d.FilterResponses)); err != nil { return err } - for _, filterResponse := range a.FilterResponses { - if err := filterResponse.encode(pe); err != nil { + for _, filterResponse := range d.FilterResponses { + if err := filterResponse.encode(pe, d.Version); err != nil { return err } } @@ -23,22 +25,22 @@ func (a *DeleteAclsResponse) encode(pe packetEncoder) error { return nil } -func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { +func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { throttleTime, err := pd.getInt32() if err != nil { return err } - a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond n, err := pd.getArrayLength() if err != nil { return err } - a.FilterResponses = make([]*FilterResponse, n) + d.FilterResponses = make([]*FilterResponse, n) for i := 0; i < n; i++ { - a.FilterResponses[i] = new(FilterResponse) - if err := a.FilterResponses[i].decode(pd, version); err != nil { + d.FilterResponses[i] = new(FilterResponse) + if err := d.FilterResponses[i].decode(pd, version); err != nil { return err } } @@ -51,6 +53,10 @@ func (d *DeleteAclsResponse) key() int16 { } func (d *DeleteAclsResponse) version() int16 { + return d.Version +} + +func (d *DeleteAclsResponse) headerVersion() int16 { return 0 } @@ -58,13 +64,14 @@ func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } +//FilterResponse is a filter response type type FilterResponse struct { Err KError ErrMsg *string MatchingAcls []*MatchingAcl } -func (f *FilterResponse) encode(pe packetEncoder) error { +func (f *FilterResponse) encode(pe packetEncoder, version int16) error { pe.putInt16(int16(f.Err)) if err := pe.putNullableString(f.ErrMsg); err != nil { return err @@ -74,7 +81,7 @@ func (f *FilterResponse) encode(pe packetEncoder) error { return err } for _, matchingAcl := range f.MatchingAcls { - if err := matchingAcl.encode(pe); err != nil { + if err := matchingAcl.encode(pe, version); err != nil { return err } } @@ -108,6 +115,7 @@ func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) { return nil } +//MatchingAcl is a matching acl type type MatchingAcl struct { Err KError ErrMsg *string @@ -115,13 +123,13 @@ type MatchingAcl struct { Acl } -func (m *MatchingAcl) encode(pe packetEncoder) error { +func (m *MatchingAcl) encode(pe packetEncoder, version int16) error { pe.putInt16(int16(m.Err)) if err := pe.putNullableString(m.ErrMsg); err != nil { return err } - if err := m.Resource.encode(pe); err != nil { + if err := m.Resource.encode(pe, version); err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response_test.go b/vendor/github.com/Shopify/sarama/acl_delete_response_test.go deleted file mode 100644 index 0d9dea68..00000000 --- a/vendor/github.com/Shopify/sarama/acl_delete_response_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - deleteAclsResponse = []byte{ - 0, 0, 0, 100, - 0, 0, 0, 1, - 0, 0, // no error - 255, 255, // no error message - 0, 0, 0, 1, // 1 matching acl - 0, 0, // no error - 255, 255, // no error message - 2, // resource type - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', - 0, 4, 'h', 'o', 's', 't', - 4, - 3, - } -) - -func TestDeleteAclsResponse(t *testing.T) { - resp := &DeleteAclsResponse{ - ThrottleTime: 100 * time.Millisecond, - FilterResponses: []*FilterResponse{{ - MatchingAcls: []*MatchingAcl{{ - Resource: Resource{ResourceType: AclResourceTopic, ResourceName: "topic"}, - Acl: Acl{Principal: "principal", Host: "host", Operation: AclOperationWrite, PermissionType: AclPermissionAllow}, - }}, - }}, - } - - testResponse(t, "", resp, deleteAclsResponse) -} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go index 02a5a1f0..29841a5c 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_request.go +++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go @@ -1,14 +1,19 @@ package sarama +//DescribeAclsRequest is a secribe acl request type type DescribeAclsRequest struct { + Version int AclFilter } func (d *DescribeAclsRequest) encode(pe packetEncoder) error { + d.AclFilter.Version = d.Version return d.AclFilter.encode(pe) } func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) { + d.Version = int(version) + d.AclFilter.Version = int(version) return d.AclFilter.decode(pd, version) } @@ -17,9 +22,18 @@ func (d *DescribeAclsRequest) key() int16 { } func (d *DescribeAclsRequest) version() int16 { - return 0 + return int16(d.Version) +} + +func (d *DescribeAclsRequest) headerVersion() int16 { + return 1 } func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request_test.go b/vendor/github.com/Shopify/sarama/acl_describe_request_test.go deleted file mode 100644 index 3af14c61..00000000 --- a/vendor/github.com/Shopify/sarama/acl_describe_request_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - aclDescribeRequest = []byte{ - 2, // resource type - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', - 0, 4, 'h', 'o', 's', 't', - 5, // acl operation - 3, // acl permission type - } -) - -func TestAclDescribeRequest(t *testing.T) { - resourcename := "topic" - principal := "principal" - host := "host" - - req := &DescribeAclsRequest{ - AclFilter{ - ResourceType: AclResourceTopic, - ResourceName: &resourcename, - Principal: &principal, - Host: &host, - Operation: AclOperationCreate, - PermissionType: AclPermissionAllow, - }, - } - - testRequest(t, "", req, aclDescribeRequest) -} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go index 5bc9497f..c43408b2 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_response.go +++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go @@ -2,7 +2,9 @@ package sarama import "time" +//DescribeAclsResponse is a describe acl response type type DescribeAclsResponse struct { + Version int16 ThrottleTime time.Duration Err KError ErrMsg *string @@ -22,7 +24,7 @@ func (d *DescribeAclsResponse) encode(pe packetEncoder) error { } for _, resourceAcl := range d.ResourceAcls { - if err := resourceAcl.encode(pe); err != nil { + if err := resourceAcl.encode(pe, d.Version); err != nil { return err } } @@ -72,9 +74,18 @@ func (d *DescribeAclsResponse) key() int16 { } func (d *DescribeAclsResponse) version() int16 { + return d.Version +} + +func (d *DescribeAclsResponse) headerVersion() int16 { return 0 } func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response_test.go b/vendor/github.com/Shopify/sarama/acl_describe_response_test.go deleted file mode 100644 index f0652cfe..00000000 --- a/vendor/github.com/Shopify/sarama/acl_describe_response_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var aclDescribeResponseError = []byte{ - 0, 0, 0, 100, - 0, 8, // error - 0, 5, 'e', 'r', 'r', 'o', 'r', - 0, 0, 0, 1, // 1 resource - 2, // cluster type - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, 0, 1, // 1 acl - 0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l', - 0, 4, 'h', 'o', 's', 't', - 4, // write - 3, // allow -} - -func TestAclDescribeResponse(t *testing.T) { - errmsg := "error" - resp := &DescribeAclsResponse{ - ThrottleTime: 100 * time.Millisecond, - Err: ErrBrokerNotAvailable, - ErrMsg: &errmsg, - ResourceAcls: []*ResourceAcls{{ - Resource: Resource{ - ResourceName: "topic", - ResourceType: AclResourceTopic, - }, - Acls: []*Acl{ - { - Principal: "principal", - Host: "host", - Operation: AclOperationWrite, - PermissionType: AclPermissionAllow, - }, - }, - }}, - } - - testResponse(t, "describe", resp, aclDescribeResponseError) -} diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go index 97063542..fad55587 100644 --- a/vendor/github.com/Shopify/sarama/acl_filter.go +++ b/vendor/github.com/Shopify/sarama/acl_filter.go @@ -1,12 +1,14 @@ package sarama type AclFilter struct { - ResourceType AclResourceType - ResourceName *string - Principal *string - Host *string - Operation AclOperation - PermissionType AclPermissionType + Version int + ResourceType AclResourceType + ResourceName *string + ResourcePatternTypeFilter AclResourcePatternType + Principal *string + Host *string + Operation AclOperation + PermissionType AclPermissionType } func (a *AclFilter) encode(pe packetEncoder) error { @@ -14,6 +16,11 @@ func (a *AclFilter) encode(pe packetEncoder) error { if err := pe.putNullableString(a.ResourceName); err != nil { return err } + + if a.Version == 1 { + pe.putInt8(int8(a.ResourcePatternTypeFilter)) + } + if err := pe.putNullableString(a.Principal); err != nil { return err } @@ -37,6 +44,16 @@ func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) { return err } + if a.Version == 1 { + pattern, err := pd.getInt8() + + if err != nil { + return err + } + + a.ResourcePatternTypeFilter = AclResourcePatternType(pattern) + } + if a.Principal, err = pd.getNullableString(); err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go index 19da6f2f..c10ad7b9 100644 --- a/vendor/github.com/Shopify/sarama/acl_types.go +++ b/vendor/github.com/Shopify/sarama/acl_types.go @@ -1,42 +1,55 @@ package sarama -type AclOperation int +type ( + AclOperation int + + AclPermissionType int + + AclResourceType int + + AclResourcePatternType int +) // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java const ( - AclOperationUnknown AclOperation = 0 - AclOperationAny AclOperation = 1 - AclOperationAll AclOperation = 2 - AclOperationRead AclOperation = 3 - AclOperationWrite AclOperation = 4 - AclOperationCreate AclOperation = 5 - AclOperationDelete AclOperation = 6 - AclOperationAlter AclOperation = 7 - AclOperationDescribe AclOperation = 8 - AclOperationClusterAction AclOperation = 9 - AclOperationDescribeConfigs AclOperation = 10 - AclOperationAlterConfigs AclOperation = 11 - AclOperationIdempotentWrite AclOperation = 12 + AclOperationUnknown AclOperation = iota + AclOperationAny + AclOperationAll + AclOperationRead + AclOperationWrite + AclOperationCreate + AclOperationDelete + AclOperationAlter + AclOperationDescribe + AclOperationClusterAction + AclOperationDescribeConfigs + AclOperationAlterConfigs + AclOperationIdempotentWrite ) -type AclPermissionType int - // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java const ( - AclPermissionUnknown AclPermissionType = 0 - AclPermissionAny AclPermissionType = 1 - AclPermissionDeny AclPermissionType = 2 - AclPermissionAllow AclPermissionType = 3 + AclPermissionUnknown AclPermissionType = iota + AclPermissionAny + AclPermissionDeny + AclPermissionAllow ) -type AclResourceType int - // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java const ( - AclResourceUnknown AclResourceType = 0 - AclResourceAny AclResourceType = 1 - AclResourceTopic AclResourceType = 2 - AclResourceGroup AclResourceType = 3 - AclResourceCluster AclResourceType = 4 - AclResourceTransactionalID AclResourceType = 5 + AclResourceUnknown AclResourceType = iota + AclResourceAny + AclResourceTopic + AclResourceGroup + AclResourceCluster + AclResourceTransactionalID +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java +const ( + AclPatternUnknown AclResourcePatternType = iota + AclPatternAny + AclPatternMatch + AclPatternLiteral + AclPatternPrefixed ) diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go index 6da166c6..95586f9a 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go @@ -1,5 +1,6 @@ package sarama +//AddOffsetsToTxnRequest adds offsets to a transaction request type AddOffsetsToTxnRequest struct { TransactionalID string ProducerID int64 @@ -47,6 +48,10 @@ func (a *AddOffsetsToTxnRequest) version() int16 { return 0 } +func (a *AddOffsetsToTxnRequest) headerVersion() int16 { + return 1 +} + func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request_test.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request_test.go deleted file mode 100644 index e96b3d33..00000000 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package sarama - -import "testing" - -var ( - addOffsetsToTxnRequest = []byte{ - 0, 3, 't', 'x', 'n', - 0, 0, 0, 0, 0, 0, 31, 64, - 0, 0, - 0, 7, 'g', 'r', 'o', 'u', 'p', 'i', 'd', - } -) - -func TestAddOffsetsToTxnRequest(t *testing.T) { - req := &AddOffsetsToTxnRequest{ - TransactionalID: "txn", - ProducerID: 8000, - ProducerEpoch: 0, - GroupID: "groupid", - } - - testRequest(t, "", req, addOffsetsToTxnRequest) -} diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go index 3a46151a..bdb18441 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go @@ -4,6 +4,7 @@ import ( "time" ) +//AddOffsetsToTxnResponse is a response type for adding offsets to txns type AddOffsetsToTxnResponse struct { ThrottleTime time.Duration Err KError @@ -39,6 +40,10 @@ func (a *AddOffsetsToTxnResponse) version() int16 { return 0 } +func (a *AddOffsetsToTxnResponse) headerVersion() int16 { + return 0 +} + func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response_test.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response_test.go deleted file mode 100644 index 4504966f..00000000 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - addOffsetsToTxnResponse = []byte{ - 0, 0, 0, 100, - 0, 47, - } -) - -func TestAddOffsetsToTxnResponse(t *testing.T) { - resp := &AddOffsetsToTxnResponse{ - ThrottleTime: 100 * time.Millisecond, - Err: ErrInvalidProducerEpoch, - } - - testResponse(t, "", resp, addOffsetsToTxnResponse) -} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go index a8a59225..6289f451 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go @@ -1,5 +1,6 @@ package sarama +//AddPartitionsToTxnRequest is a add paartition request type AddPartitionsToTxnRequest struct { TransactionalID string ProducerID int64 @@ -71,6 +72,10 @@ func (a *AddPartitionsToTxnRequest) version() int16 { return 0 } +func (a *AddPartitionsToTxnRequest) headerVersion() int16 { + return 1 +} + func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request_test.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request_test.go deleted file mode 100644 index 76a2eee4..00000000 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package sarama - -import "testing" - -var ( - addPartitionsToTxnRequest = []byte{ - 0, 3, 't', 'x', 'n', - 0, 0, 0, 0, 0, 0, 31, 64, // ProducerID - 0, 0, 0, 0, // ProducerEpoch - 0, 1, // 1 topic - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, 0, 1, 0, 0, 0, 1, - } -) - -func TestAddPartitionsToTxnRequest(t *testing.T) { - req := &AddPartitionsToTxnRequest{ - TransactionalID: "txn", - ProducerID: 8000, - ProducerEpoch: 0, - TopicPartitions: map[string][]int32{ - "topic": []int32{1}, - }, - } - - testRequest(t, "", req, addPartitionsToTxnRequest) -} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go index 581c556c..73b73b07 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go @@ -4,6 +4,7 @@ import ( "time" ) +//AddPartitionsToTxnResponse is a partition errors to transaction type type AddPartitionsToTxnResponse struct { ThrottleTime time.Duration Errors map[string][]*PartitionError @@ -78,10 +79,15 @@ func (a *AddPartitionsToTxnResponse) version() int16 { return 0 } +func (a *AddPartitionsToTxnResponse) headerVersion() int16 { + return 0 +} + func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } +//PartitionError is a partition error type type PartitionError struct { Partition int32 Err KError diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response_test.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response_test.go deleted file mode 100644 index 7b5b82f8..00000000 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - addPartitionsToTxnResponse = []byte{ - 0, 0, 0, 100, - 0, 0, 0, 1, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, 0, 1, // 1 partition error - 0, 0, 0, 2, // partition 2 - 0, 48, // error - } -) - -func TestAddPartitionsToTxnResponse(t *testing.T) { - resp := &AddPartitionsToTxnResponse{ - ThrottleTime: 100 * time.Millisecond, - Errors: map[string][]*PartitionError{ - "topic": []*PartitionError{&PartitionError{ - Err: ErrInvalidTxnState, - Partition: 2, - }}, - }, - } - - testResponse(t, "", resp, addPartitionsToTxnResponse) -} diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go index 52725758..9dea0255 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/Shopify/sarama/admin.go @@ -1,6 +1,13 @@ package sarama -import "errors" +import ( + "errors" + "fmt" + "math/rand" + "strconv" + "sync" + "time" +) // ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics, // brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0. @@ -13,6 +20,12 @@ type ClusterAdmin interface { // may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0. CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error + // List the topics available in the cluster with the default options. + ListTopics() (map[string]TopicDetail, error) + + // Describe some topics in the cluster. + DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) + // Delete a topic. It may take several seconds after the DeleteTopic to returns success // and for all the brokers to become aware that the topics are gone. // During this time, listTopics may continue to return information about the deleted topic. @@ -29,6 +42,14 @@ type ClusterAdmin interface { // new partitions. This operation is supported by brokers with version 1.0.0 or higher. CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error + // Alter the replica assignment for partitions. + // This operation is supported by brokers with version 2.4.0.0 or higher. + AlterPartitionReassignments(topic string, assignment [][]int32) error + + // Provides info on ongoing partitions replica reassignments. + // This operation is supported by brokers with version 2.4.0.0 or higher. + ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) + // Delete records whose offset is smaller than the given offset of the corresponding partition. // This operation is supported by brokers with version 0.11.0.0 or higher. DeleteRecords(topic string, partitionOffsets map[int32]int64) error @@ -65,6 +86,24 @@ type ClusterAdmin interface { // This operation is supported by brokers with version 0.11.0.0 or higher. DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) + // List the consumer groups available in the cluster. + ListConsumerGroups() (map[string]string, error) + + // Describe the given consumer groups. + DescribeConsumerGroups(groups []string) ([]*GroupDescription, error) + + // List the consumer group offsets available in the cluster. + ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) + + // Delete a consumer group. + DeleteConsumerGroup(group string) error + + // Get information about the nodes in the cluster + DescribeCluster() (brokers []*Broker, controllerID int32, err error) + + // Get information about all log directories on the given set of brokers + DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error) + // Close shuts down the admin and closes underlying client. Close() error } @@ -80,9 +119,14 @@ func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) { if err != nil { return nil, err } + return NewClusterAdminFromClient(client) +} +// NewClusterAdminFromClient creates a new ClusterAdmin using the given client. +// Note that underlying client will also be closed on admin's Close() call. +func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) { //make sure we can retrieve the controller - _, err = client.Controller() + _, err := client.Controller() if err != nil { return nil, err } @@ -102,14 +146,51 @@ func (ca *clusterAdmin) Controller() (*Broker, error) { return ca.client.Controller() } -func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { +func (ca *clusterAdmin) refreshController() (*Broker, error) { + return ca.client.RefreshController() +} + +// isErrNoController returns `true` if the given error type unwraps to an +// `ErrNotController` response from Kafka +func isErrNoController(err error) bool { + switch e := err.(type) { + case *TopicError: + return e.Err == ErrNotController + case *TopicPartitionError: + return e.Err == ErrNotController + case KError: + return e == ErrNotController + } + return false +} + +// retryOnError will repeatedly call the given (error-returning) func in the +// case that its response is non-nil and retriable (as determined by the +// provided retriable func) up to the maximum number of tries permitted by +// the admin client configuration +func (ca *clusterAdmin) retryOnError(retriable func(error) bool, fn func() error) error { + var err error + for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ { + err = fn() + if err == nil || !retriable(err) { + return err + } + Logger.Printf( + "admin/request retrying after %dms... (%d attempts remaining)\n", + ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt) + time.Sleep(ca.conf.Admin.Retry.Backoff) + continue + } + return err +} +func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { if topic == "" { return ErrInvalidTopic } if detail == nil { - return errors.New("You must specify topic details") + return errors.New("you must specify topic details") } topicDetails := make(map[string]*TopicDetail) @@ -128,30 +209,180 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO request.Version = 2 } - b, err := ca.Controller() + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.CreateTopics(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicErrors[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr.Err != ErrNoError { + if topicErr.Err == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { + controller, err := ca.Controller() if err != nil { - return err + return nil, err } - rsp, err := b.CreateTopics(request) + request := &MetadataRequest{ + Topics: topics, + AllowAutoTopicCreation: false, + } + + if ca.conf.Version.IsAtLeast(V1_0_0_0) { + request.Version = 5 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + } + + response, err := controller.GetMetadata(request) if err != nil { - return err + return nil, err } + return response.Topics, nil +} - topicErr, ok := rsp.TopicErrors[topic] - if !ok { - return ErrIncompleteResponse +func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { + controller, err := ca.Controller() + if err != nil { + return nil, int32(0), err } - if topicErr.Err != ErrNoError { - return topicErr.Err + request := &MetadataRequest{ + Topics: []string{}, } - return nil + if ca.conf.Version.IsAtLeast(V0_10_0_0) { + request.Version = 1 + } + + response, err := controller.GetMetadata(request) + if err != nil { + return nil, int32(0), err + } + + return response.Brokers, response.ControllerID, nil } -func (ca *clusterAdmin) DeleteTopic(topic string) error { +func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) { + brokers := ca.client.Brokers() + for _, b := range brokers { + if b.ID() == id { + return b, nil + } + } + return nil, fmt.Errorf("could not find broker id %d", id) +} +func (ca *clusterAdmin) findAnyBroker() (*Broker, error) { + brokers := ca.client.Brokers() + if len(brokers) > 0 { + index := rand.Intn(len(brokers)) + return brokers[index], nil + } + return nil, errors.New("no available broker") +} + +func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { + // In order to build TopicDetails we need to first get the list of all + // topics using a MetadataRequest and then get their configs using a + // DescribeConfigsRequest request. To avoid sending many requests to the + // broker, we use a single DescribeConfigsRequest. + + // Send the all-topic MetadataRequest + b, err := ca.findAnyBroker() + if err != nil { + return nil, err + } + _ = b.Open(ca.client.Config()) + + metadataReq := &MetadataRequest{} + metadataResp, err := b.GetMetadata(metadataReq) + if err != nil { + return nil, err + } + + topicsDetailsMap := make(map[string]TopicDetail) + + var describeConfigsResources []*ConfigResource + + for _, topic := range metadataResp.Topics { + topicDetails := TopicDetail{ + NumPartitions: int32(len(topic.Partitions)), + } + if len(topic.Partitions) > 0 { + topicDetails.ReplicaAssignment = map[int32][]int32{} + for _, partition := range topic.Partitions { + topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas + } + topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas)) + } + topicsDetailsMap[topic.Name] = topicDetails + + // we populate the resources we want to describe from the MetadataResponse + topicResource := ConfigResource{ + Type: TopicResource, + Name: topic.Name, + } + describeConfigsResources = append(describeConfigsResources, &topicResource) + } + + // Send the DescribeConfigsRequest + describeConfigsReq := &DescribeConfigsRequest{ + Resources: describeConfigsResources, + } + + if ca.conf.Version.IsAtLeast(V1_1_0_0) { + describeConfigsReq.Version = 1 + } + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + describeConfigsReq.Version = 2 + } + + describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq) + if err != nil { + return nil, err + } + + for _, resource := range describeConfigsResp.Resources { + topicDetails := topicsDetailsMap[resource.Name] + topicDetails.ConfigEntries = make(map[string]*string) + + for _, entry := range resource.Configs { + // only include non-default non-sensitive config + // (don't actually think topic config will ever be sensitive) + if entry.Default || entry.Sensitive { + continue + } + topicDetails.ConfigEntries[entry.Name] = &entry.Value + } + + topicsDetailsMap[resource.Name] = topicDetails + } + + return topicsDetailsMap, nil +} + +func (ca *clusterAdmin) DeleteTopic(topic string) error { if topic == "" { return ErrInvalidTopic } @@ -165,25 +396,31 @@ func (ca *clusterAdmin) DeleteTopic(topic string) error { request.Version = 1 } - b, err := ca.Controller() - if err != nil { - return err - } + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } - rsp, err := b.DeleteTopics(request) - if err != nil { - return err - } + rsp, err := b.DeleteTopics(request) + if err != nil { + return err + } - topicErr, ok := rsp.TopicErrorCodes[topic] - if !ok { - return ErrIncompleteResponse - } + topicErr, ok := rsp.TopicErrorCodes[topic] + if !ok { + return ErrIncompleteResponse + } - if topicErr != ErrNoError { - return topicErr - } - return nil + if topicErr != ErrNoError { + if topicErr == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) } func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error { @@ -199,63 +436,170 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ Timeout: ca.conf.Admin.Timeout, } - b, err := ca.Controller() - if err != nil { - return err - } + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } - rsp, err := b.CreatePartitions(request) - if err != nil { - return err + rsp, err := b.CreatePartitions(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicPartitionErrors[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr.Err != ErrNoError { + if topicErr.Err == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error { + if topic == "" { + return ErrInvalidTopic } - topicErr, ok := rsp.TopicPartitionErrors[topic] - if !ok { - return ErrIncompleteResponse + request := &AlterPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), } - if topicErr.Err != ErrNoError { - return topicErr.Err + for i := 0; i < len(assignment); i++ { + request.AddBlock(topic, int32(i), assignment[i]) } - return nil -} + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } -func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error { + errs := make([]error, 0) + + rsp, err := b.AlterPartitionReassignments(request) + + if err != nil { + errs = append(errs, err) + } else { + if rsp.ErrorCode > 0 { + errs = append(errs, errors.New(rsp.ErrorCode.Error())) + } + + for topic, topicErrors := range rsp.Errors { + for partition, partitionError := range topicErrors { + if partitionError.errorCode != ErrNoError { + errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error()) + errs = append(errs, errors.New(errStr)) + } + } + } + } + + if len(errs) > 0 { + return ErrReassignPartitions{MultiError{&errs}} + } + + return nil + }) +} +func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) { if topic == "" { - return ErrInvalidTopic + return nil, ErrInvalidTopic } - topics := make(map[string]*DeleteRecordsRequestTopic) - topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: partitionOffsets} - request := &DeleteRecordsRequest{ - Topics: topics, - Timeout: ca.conf.Admin.Timeout, + request := &ListPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), } + request.AddBlock(topic, partitions) + b, err := ca.Controller() if err != nil { - return err + return nil, err } + _ = b.Open(ca.client.Config()) - rsp, err := b.DeleteRecords(request) - if err != nil { - return err + rsp, err := b.ListPartitionReassignments(request) + + if err == nil && rsp != nil { + return rsp.TopicStatus, nil + } else { + return nil, err } +} - _, ok := rsp.Topics[topic] - if !ok { - return ErrIncompleteResponse +func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error { + if topic == "" { + return ErrInvalidTopic + } + partitionPerBroker := make(map[*Broker][]int32) + for partition := range partitionOffsets { + broker, err := ca.client.Leader(topic, partition) + if err != nil { + return err + } + if _, ok := partitionPerBroker[broker]; ok { + partitionPerBroker[broker] = append(partitionPerBroker[broker], partition) + } else { + partitionPerBroker[broker] = []int32{partition} + } } + errs := make([]error, 0) + for broker, partitions := range partitionPerBroker { + topics := make(map[string]*DeleteRecordsRequestTopic) + recordsToDelete := make(map[int32]int64) + for _, p := range partitions { + recordsToDelete[p] = partitionOffsets[p] + } + topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete} + request := &DeleteRecordsRequest{ + Topics: topics, + Timeout: ca.conf.Admin.Timeout, + } + rsp, err := broker.DeleteRecords(request) + if err != nil { + errs = append(errs, err) + } else { + deleteRecordsResponseTopic, ok := rsp.Topics[topic] + if !ok { + errs = append(errs, ErrIncompleteResponse) + } else { + for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions { + if deleteRecordsResponsePartition.Err != ErrNoError { + errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error())) + } + } + } + } + } + if len(errs) > 0 { + return ErrDeleteRecords{MultiError{&errs}} + } //todo since we are dealing with couple of partitions it would be good if we return slice of errors //for each partition instead of one error return nil } -func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) { +// Returns a bool indicating whether the resource request needs to go to a +// specific broker +func dependsOnSpecificNode(resource ConfigResource) bool { + return (resource.Type == BrokerResource && resource.Name != "") || + resource.Type == BrokerLoggerResource +} +func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) { var entries []ConfigEntry var resources []*ConfigResource resources = append(resources, &resource) @@ -264,11 +608,31 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, Resources: resources, } - b, err := ca.Controller() + if ca.conf.Version.IsAtLeast(V1_1_0_0) { + request.Version = 1 + } + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 2 + } + + var ( + b *Broker + err error + ) + + // DescribeConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(resource) { + id, _ := strconv.Atoi(resource.Name) + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } if err != nil { return nil, err } + _ = b.Open(ca.client.Config()) rsp, err := b.DescribeConfigs(request) if err != nil { return nil, err @@ -279,6 +643,9 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, if rspResource.ErrorMsg != "" { return nil, errors.New(rspResource.ErrorMsg) } + if rspResource.ErrorCode != 0 { + return nil, KError(rspResource.ErrorCode) + } for _, cfgEntry := range rspResource.Configs { entries = append(entries, *cfgEntry) } @@ -288,7 +655,6 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, } func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error { - var resources []*AlterConfigsResource resources = append(resources, &AlterConfigsResource{ Type: resourceType, @@ -301,11 +667,23 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string ValidateOnly: validateOnly, } - b, err := ca.Controller() + var ( + b *Broker + err error + ) + + // AlterConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) { + id, _ := strconv.Atoi(name) + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } if err != nil { return err } + _ = b.Open(ca.client.Config()) rsp, err := b.AlterConfigs(request) if err != nil { return err @@ -316,6 +694,9 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string if rspResource.ErrorMsg != "" { return errors.New(rspResource.ErrorMsg) } + if rspResource.ErrorCode != 0 { + return KError(rspResource.ErrorCode) + } } } return nil @@ -326,6 +707,10 @@ func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error { acls = append(acls, &AclCreation{resource, acl}) request := &CreateAclsRequest{AclCreations: acls} + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + b, err := ca.Controller() if err != nil { return err @@ -336,9 +721,12 @@ func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error { } func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) { - request := &DescribeAclsRequest{AclFilter: filter} + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + b, err := ca.Controller() if err != nil { return nil, err @@ -361,6 +749,10 @@ func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]Matchi filters = append(filters, &filter) request := &DeleteAclsRequest{Filters: filters} + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + b, err := ca.Controller() if err != nil { return nil, err @@ -376,7 +768,167 @@ func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]Matchi for _, mACL := range fr.MatchingAcls { mAcls = append(mAcls, *mACL) } - } return mAcls, nil } + +func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) { + groupsPerBroker := make(map[*Broker][]string) + + for _, group := range groups { + controller, err := ca.client.Coordinator(group) + if err != nil { + return nil, err + } + groupsPerBroker[controller] = append(groupsPerBroker[controller], group) + } + + for broker, brokerGroups := range groupsPerBroker { + response, err := broker.DescribeGroups(&DescribeGroupsRequest{ + Groups: brokerGroups, + }) + if err != nil { + return nil, err + } + + result = append(result, response.Groups...) + } + return result, nil +} + +func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) { + allGroups = make(map[string]string) + + // Query brokers in parallel, since we have to query *all* brokers + brokers := ca.client.Brokers() + groupMaps := make(chan map[string]string, len(brokers)) + errChan := make(chan error, len(brokers)) + wg := sync.WaitGroup{} + + for _, b := range brokers { + wg.Add(1) + go func(b *Broker, conf *Config) { + defer wg.Done() + _ = b.Open(conf) // Ensure that broker is opened + + response, err := b.ListGroups(&ListGroupsRequest{}) + if err != nil { + errChan <- err + return + } + + groups := make(map[string]string) + for group, typ := range response.Groups { + groups[group] = typ + } + + groupMaps <- groups + }(b, ca.conf) + } + + wg.Wait() + close(groupMaps) + close(errChan) + + for groupMap := range groupMaps { + for group, protocolType := range groupMap { + allGroups[group] = protocolType + } + } + + // Intentionally return only the first error for simplicity + err = <-errChan + return +} + +func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) { + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return nil, err + } + + request := &OffsetFetchRequest{ + ConsumerGroup: group, + partitions: topicPartitions, + } + + if ca.conf.Version.IsAtLeast(V0_10_2_0) { + request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_8_2_2) { + request.Version = 1 + } + + return coordinator.FetchOffset(request) +} + +func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } + + request := &DeleteGroupsRequest{ + Groups: []string{group}, + } + + resp, err := coordinator.DeleteGroups(request) + if err != nil { + return err + } + + groupErr, ok := resp.GroupErrorCodes[group] + if !ok { + return ErrIncompleteResponse + } + + if groupErr != ErrNoError { + return groupErr + } + + return nil +} + +func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) { + allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata) + + // Query brokers in parallel, since we may have to query multiple brokers + logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds)) + errChan := make(chan error, len(brokerIds)) + wg := sync.WaitGroup{} + + for _, b := range brokerIds { + wg.Add(1) + broker, err := ca.findBroker(b) + if err != nil { + Logger.Printf("Unable to find broker with ID = %v\n", b) + continue + } + go func(b *Broker, conf *Config) { + defer wg.Done() + _ = b.Open(conf) // Ensure that broker is opened + + response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{}) + if err != nil { + errChan <- err + return + } + logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata) + logDirs[b.ID()] = response.LogDirs + logDirsMaps <- logDirs + }(broker, ca.conf) + } + + wg.Wait() + close(logDirsMaps) + close(errChan) + + for logDirsMap := range logDirsMaps { + for id, logDirs := range logDirsMap { + allLogDirs[id] = logDirs + } + } + + // Intentionally return only the first error for simplicity + err = <-errChan + return +} diff --git a/vendor/github.com/Shopify/sarama/admin_test.go b/vendor/github.com/Shopify/sarama/admin_test.go deleted file mode 100644 index 9d3cc317..00000000 --- a/vendor/github.com/Shopify/sarama/admin_test.go +++ /dev/null @@ -1,501 +0,0 @@ -package sarama - -import ( - "errors" - "testing" -) - -func TestClusterAdmin(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - }) - - config := NewConfig() - config.Version = V1_0_0_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminInvalidController(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - }) - - config := NewConfig() - config.Version = V1_0_0_0 - _, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err == nil { - t.Fatal(errors.New("controller not set still cluster admin was created")) - } - - if err != ErrControllerNotAvailable { - t.Fatal(err) - } -} - -func TestClusterAdminCreateTopic(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "CreateTopicsRequest": NewMockCreateTopicsResponse(t), - }) - - config := NewConfig() - config.Version = V0_10_2_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - err = admin.CreateTopic("my_topic", &TopicDetail{NumPartitions: 1, ReplicationFactor: 1}, false) - if err != nil { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminCreateTopicWithInvalidTopicDetail(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "CreateTopicsRequest": NewMockCreateTopicsResponse(t), - }) - - config := NewConfig() - config.Version = V0_10_2_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - err = admin.CreateTopic("my_topic", nil, false) - if err.Error() != "You must specify topic details" { - t.Fatal(err) - } - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminCreateTopicWithDiffVersion(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "CreateTopicsRequest": NewMockCreateTopicsResponse(t), - }) - - config := NewConfig() - config.Version = V0_11_0_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - err = admin.CreateTopic("my_topic", &TopicDetail{NumPartitions: 1, ReplicationFactor: 1}, false) - if err != ErrInsufficientData { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminDeleteTopic(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "DeleteTopicsRequest": NewMockDeleteTopicsResponse(t), - }) - - config := NewConfig() - config.Version = V0_10_2_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - err = admin.DeleteTopic("my_topic") - if err != nil { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminDeleteEmptyTopic(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "DeleteTopicsRequest": NewMockDeleteTopicsResponse(t), - }) - - config := NewConfig() - config.Version = V0_10_2_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - err = admin.DeleteTopic("") - if err != ErrInvalidTopic { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminCreatePartitions(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "CreatePartitionsRequest": NewMockCreatePartitionsResponse(t), - }) - - config := NewConfig() - config.Version = V1_0_0_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - err = admin.CreatePartitions("my_topic", 3, nil, false) - if err != nil { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminCreatePartitionsWithDiffVersion(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "CreatePartitionsRequest": NewMockCreatePartitionsResponse(t), - }) - - config := NewConfig() - config.Version = V0_10_2_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - err = admin.CreatePartitions("my_topic", 3, nil, false) - if err != ErrUnsupportedVersion { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminDeleteRecords(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "DeleteRecordsRequest": NewMockDeleteRecordsResponse(t), - }) - - config := NewConfig() - config.Version = V1_0_0_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - partitionOffset := make(map[int32]int64) - partitionOffset[1] = 1000 - partitionOffset[2] = 1000 - partitionOffset[3] = 1000 - - err = admin.DeleteRecords("my_topic", partitionOffset) - if err != nil { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminDeleteRecordsWithDiffVersion(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "DeleteRecordsRequest": NewMockDeleteRecordsResponse(t), - }) - - config := NewConfig() - config.Version = V0_10_2_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - partitionOffset := make(map[int32]int64) - partitionOffset[1] = 1000 - partitionOffset[2] = 1000 - partitionOffset[3] = 1000 - - err = admin.DeleteRecords("my_topic", partitionOffset) - if err != ErrUnsupportedVersion { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminDescribeConfig(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "DescribeConfigsRequest": NewMockDescribeConfigsResponse(t), - }) - - config := NewConfig() - config.Version = V1_0_0_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - resource := ConfigResource{Name: "r1", Type: TopicResource, ConfigNames: []string{"my_topic"}} - entries, err := admin.DescribeConfig(resource) - if err != nil { - t.Fatal(err) - } - - if len(entries) <= 0 { - t.Fatal(errors.New("no resource present")) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminAlterConfig(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "AlterConfigsRequest": NewMockAlterConfigsResponse(t), - }) - - config := NewConfig() - config.Version = V1_0_0_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - var value string - entries := make(map[string]*string) - value = "3" - entries["ReplicationFactor"] = &value - err = admin.AlterConfig(TopicResource, "my_topic", entries, false) - if err != nil { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminCreateAcl(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "CreateAclsRequest": NewMockCreateAclsResponse(t), - }) - - config := NewConfig() - config.Version = V1_0_0_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - r := Resource{ResourceType: AclResourceTopic, ResourceName: "my_topic"} - a := Acl{Host: "localhost", Operation: AclOperationAlter, PermissionType: AclPermissionAny} - - err = admin.CreateACL(r, a) - if err != nil { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminListAcls(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "DescribeAclsRequest": NewMockListAclsResponse(t), - "CreateAclsRequest": NewMockCreateAclsResponse(t), - }) - - config := NewConfig() - config.Version = V1_0_0_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - r := Resource{ResourceType: AclResourceTopic, ResourceName: "my_topic"} - a := Acl{Host: "localhost", Operation: AclOperationAlter, PermissionType: AclPermissionAny} - - err = admin.CreateACL(r, a) - if err != nil { - t.Fatal(err) - } - resourceName := "my_topic" - filter := AclFilter{ - ResourceType: AclResourceTopic, - Operation: AclOperationRead, - ResourceName: &resourceName, - } - - rAcls, err := admin.ListAcls(filter) - if err != nil { - t.Fatal(err) - } - if len(rAcls) <= 0 { - t.Fatal("no acls present") - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestClusterAdminDeleteAcl(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(seedBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), - "DeleteAclsRequest": NewMockDeleteAclsResponse(t), - }) - - config := NewConfig() - config.Version = V1_0_0_0 - admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - resourceName := "my_topic" - filter := AclFilter{ - ResourceType: AclResourceTopic, - Operation: AclOperationAlter, - ResourceName: &resourceName, - } - - _, err = admin.DeleteACL(filter, false) - if err != nil { - t.Fatal(err) - } - - err = admin.Close() - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go index 48c44ead..c88bb604 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_request.go +++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go @@ -1,45 +1,47 @@ package sarama +//AlterConfigsRequest is an alter config request type type AlterConfigsRequest struct { Resources []*AlterConfigsResource ValidateOnly bool } +//AlterConfigsResource is an alter config resource type type AlterConfigsResource struct { Type ConfigResourceType Name string ConfigEntries map[string]*string } -func (acr *AlterConfigsRequest) encode(pe packetEncoder) error { - if err := pe.putArrayLength(len(acr.Resources)); err != nil { +func (a *AlterConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(a.Resources)); err != nil { return err } - for _, r := range acr.Resources { + for _, r := range a.Resources { if err := r.encode(pe); err != nil { return err } } - pe.putBool(acr.ValidateOnly) + pe.putBool(a.ValidateOnly) return nil } -func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { +func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { resourceCount, err := pd.getArrayLength() if err != nil { return err } - acr.Resources = make([]*AlterConfigsResource, resourceCount) - for i := range acr.Resources { + a.Resources = make([]*AlterConfigsResource, resourceCount) + for i := range a.Resources { r := &AlterConfigsResource{} err = r.decode(pd, version) if err != nil { return err } - acr.Resources[i] = r + a.Resources[i] = r } validateOnly, err := pd.getBool() @@ -47,22 +49,22 @@ func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { return err } - acr.ValidateOnly = validateOnly + a.ValidateOnly = validateOnly return nil } -func (ac *AlterConfigsResource) encode(pe packetEncoder) error { - pe.putInt8(int8(ac.Type)) +func (a *AlterConfigsResource) encode(pe packetEncoder) error { + pe.putInt8(int8(a.Type)) - if err := pe.putString(ac.Name); err != nil { + if err := pe.putString(a.Name); err != nil { return err } - if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil { + if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil { return err } - for configKey, configValue := range ac.ConfigEntries { + for configKey, configValue := range a.ConfigEntries { if err := pe.putString(configKey); err != nil { return err } @@ -74,18 +76,18 @@ func (ac *AlterConfigsResource) encode(pe packetEncoder) error { return nil } -func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error { +func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error { t, err := pd.getInt8() if err != nil { return err } - ac.Type = ConfigResourceType(t) + a.Type = ConfigResourceType(t) name, err := pd.getString() if err != nil { return err } - ac.Name = name + a.Name = name n, err := pd.getArrayLength() if err != nil { @@ -93,13 +95,13 @@ func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error { } if n > 0 { - ac.ConfigEntries = make(map[string]*string, n) + a.ConfigEntries = make(map[string]*string, n) for i := 0; i < n; i++ { configKey, err := pd.getString() if err != nil { return err } - if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { return err } } @@ -107,14 +109,18 @@ func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error { return err } -func (acr *AlterConfigsRequest) key() int16 { +func (a *AlterConfigsRequest) key() int16 { return 33 } -func (acr *AlterConfigsRequest) version() int16 { +func (a *AlterConfigsRequest) version() int16 { return 0 } -func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion { +func (a *AlterConfigsRequest) headerVersion() int16 { + return 1 +} + +func (a *AlterConfigsRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request_test.go b/vendor/github.com/Shopify/sarama/alter_configs_request_test.go deleted file mode 100644 index b9407ca7..00000000 --- a/vendor/github.com/Shopify/sarama/alter_configs_request_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyAlterConfigsRequest = []byte{ - 0, 0, 0, 0, // 0 configs - 0, // don't Validate - } - - singleAlterConfigsRequest = []byte{ - 0, 0, 0, 1, // 1 config - 2, // a topic - 0, 3, 'f', 'o', 'o', // topic name: foo - 0, 0, 0, 1, //1 config name - 0, 10, // 10 chars - 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', - 0, 4, - '1', '0', '0', '0', - 0, // don't validate - } - - doubleAlterConfigsRequest = []byte{ - 0, 0, 0, 2, // 2 config - 2, // a topic - 0, 3, 'f', 'o', 'o', // topic name: foo - 0, 0, 0, 1, //1 config name - 0, 10, // 10 chars - 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', - 0, 4, - '1', '0', '0', '0', - 2, // a topic - 0, 3, 'b', 'a', 'r', // topic name: foo - 0, 0, 0, 1, //2 config - 0, 12, // 12 chars - 'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's', - 0, 4, - '1', '0', '0', '0', - 0, // don't validate - } -) - -func TestAlterConfigsRequest(t *testing.T) { - var request *AlterConfigsRequest - - request = &AlterConfigsRequest{ - Resources: []*AlterConfigsResource{}, - } - testRequest(t, "no requests", request, emptyAlterConfigsRequest) - - configValue := "1000" - request = &AlterConfigsRequest{ - Resources: []*AlterConfigsResource{ - &AlterConfigsResource{ - Type: TopicResource, - Name: "foo", - ConfigEntries: map[string]*string{ - "segment.ms": &configValue, - }, - }, - }, - } - - testRequest(t, "one config", request, singleAlterConfigsRequest) - - request = &AlterConfigsRequest{ - Resources: []*AlterConfigsResource{ - &AlterConfigsResource{ - Type: TopicResource, - Name: "foo", - ConfigEntries: map[string]*string{ - "segment.ms": &configValue, - }, - }, - &AlterConfigsResource{ - Type: TopicResource, - Name: "bar", - ConfigEntries: map[string]*string{ - "retention.ms": &configValue, - }, - }, - }, - } - - testRequest(t, "two configs", request, doubleAlterConfigsRequest) -} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go index 29b09e1f..3266f927 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_response.go +++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go @@ -2,11 +2,13 @@ package sarama import "time" +//AlterConfigsResponse is a response type for alter config type AlterConfigsResponse struct { ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } +//AlterConfigsResourceResponse is a response type for alter config resource type AlterConfigsResourceResponse struct { ErrorCode int16 ErrorMsg string @@ -14,21 +16,21 @@ type AlterConfigsResourceResponse struct { Name string } -func (ct *AlterConfigsResponse) encode(pe packetEncoder) error { - pe.putInt32(int32(ct.ThrottleTime / time.Millisecond)) +func (a *AlterConfigsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) - if err := pe.putArrayLength(len(ct.Resources)); err != nil { + if err := pe.putArrayLength(len(a.Resources)); err != nil { return err } - for i := range ct.Resources { - pe.putInt16(ct.Resources[i].ErrorCode) - err := pe.putString(ct.Resources[i].ErrorMsg) + for i := range a.Resources { + pe.putInt16(a.Resources[i].ErrorCode) + err := pe.putString(a.Resources[i].ErrorMsg) if err != nil { return nil } - pe.putInt8(int8(ct.Resources[i].Type)) - err = pe.putString(ct.Resources[i].Name) + pe.putInt8(int8(a.Resources[i].Type)) + err = pe.putString(a.Resources[i].Name) if err != nil { return nil } @@ -37,59 +39,63 @@ func (ct *AlterConfigsResponse) encode(pe packetEncoder) error { return nil } -func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { +func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { throttleTime, err := pd.getInt32() if err != nil { return err } - acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond responseCount, err := pd.getArrayLength() if err != nil { return err } - acr.Resources = make([]*AlterConfigsResourceResponse, responseCount) + a.Resources = make([]*AlterConfigsResourceResponse, responseCount) - for i := range acr.Resources { - acr.Resources[i] = new(AlterConfigsResourceResponse) + for i := range a.Resources { + a.Resources[i] = new(AlterConfigsResourceResponse) errCode, err := pd.getInt16() if err != nil { return err } - acr.Resources[i].ErrorCode = errCode + a.Resources[i].ErrorCode = errCode e, err := pd.getString() if err != nil { return err } - acr.Resources[i].ErrorMsg = e + a.Resources[i].ErrorMsg = e t, err := pd.getInt8() if err != nil { return err } - acr.Resources[i].Type = ConfigResourceType(t) + a.Resources[i].Type = ConfigResourceType(t) name, err := pd.getString() if err != nil { return err } - acr.Resources[i].Name = name + a.Resources[i].Name = name } return nil } -func (r *AlterConfigsResponse) key() int16 { +func (a *AlterConfigsResponse) key() int16 { return 32 } -func (r *AlterConfigsResponse) version() int16 { +func (a *AlterConfigsResponse) version() int16 { return 0 } -func (r *AlterConfigsResponse) requiredVersion() KafkaVersion { +func (a *AlterConfigsResponse) headerVersion() int16 { + return 0 +} + +func (a *AlterConfigsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response_test.go b/vendor/github.com/Shopify/sarama/alter_configs_response_test.go deleted file mode 100644 index 45920287..00000000 --- a/vendor/github.com/Shopify/sarama/alter_configs_response_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - alterResponseEmpty = []byte{ - 0, 0, 0, 0, //throttle - 0, 0, 0, 0, // no configs - } - - alterResponsePopulated = []byte{ - 0, 0, 0, 0, //throttle - 0, 0, 0, 1, // response - 0, 0, //errorcode - 0, 0, //string - 2, // topic - 0, 3, 'f', 'o', 'o', - } -) - -func TestAlterConfigsResponse(t *testing.T) { - var response *AlterConfigsResponse - - response = &AlterConfigsResponse{ - Resources: []*AlterConfigsResourceResponse{}, - } - testVersionDecodable(t, "empty", response, alterResponseEmpty, 0) - if len(response.Resources) != 0 { - t.Error("Expected no groups") - } - - response = &AlterConfigsResponse{ - Resources: []*AlterConfigsResourceResponse{ - &AlterConfigsResourceResponse{ - ErrorCode: 0, - ErrorMsg: "", - Type: TopicResource, - Name: "foo", - }, - }, - } - testResponse(t, "response with error", response, alterResponsePopulated) -} diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go new file mode 100644 index 00000000..f0a2f9dd --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go @@ -0,0 +1,130 @@ +package sarama + +type alterPartitionReassignmentsBlock struct { + replicas []int32 +} + +func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error { + if err := pe.putNullableCompactInt32Array(b.replicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) { + if b.replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + return nil +} + +type AlterPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string]map[int32]*alterPartitionReassignmentsBlock + Version int16 +} + +func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsBlock{} + if err := block.decode(pd); err != nil { + return err + } + r.blocks[topic][partition] = block + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *AlterPartitionReassignmentsRequest) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + } + + r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas} +} diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go new file mode 100644 index 00000000..b3f9a15f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go @@ -0,0 +1,157 @@ +package sarama + +type alterPartitionReassignmentsErrorBlock struct { + errorCode KError + errorMessage *string +} + +func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error { + pe.putInt16(int16(b.errorCode)) + if err := pe.putNullableCompactString(b.errorMessage); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) { + errorCode, err := pd.getInt16() + if err != nil { + return err + } + b.errorCode = KError(errorCode) + b.errorMessage, err = pd.getCompactNullableString() + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return err +} + +type AlterPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock +} + +func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock) + r.Errors[topic] = partitions + } + + partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message} +} + +func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.Errors)) + for topic, partitions := range r.Errors { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if numTopics > 0 { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsErrorBlock{} + if err := block.decode(pd); err != nil { + return err + } + + r.Errors[topic][partition] = block + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *AlterPartitionReassignmentsResponse) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go index ab65f01c..d67c5e1e 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_request.go +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -1,24 +1,29 @@ package sarama +//ApiVersionsRequest ... type ApiVersionsRequest struct { } -func (r *ApiVersionsRequest) encode(pe packetEncoder) error { +func (a *ApiVersionsRequest) encode(pe packetEncoder) error { return nil } -func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { +func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { return nil } -func (r *ApiVersionsRequest) key() int16 { +func (a *ApiVersionsRequest) key() int16 { return 18 } -func (r *ApiVersionsRequest) version() int16 { +func (a *ApiVersionsRequest) version() int16 { return 0 } -func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { +func (a *ApiVersionsRequest) headerVersion() int16 { + return 1 +} + +func (a *ApiVersionsRequest) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/vendor/github.com/Shopify/sarama/api_versions_request_test.go b/vendor/github.com/Shopify/sarama/api_versions_request_test.go deleted file mode 100644 index 5ab4fa71..00000000 --- a/vendor/github.com/Shopify/sarama/api_versions_request_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package sarama - -import "testing" - -var ( - apiVersionRequest = []byte{} -) - -func TestApiVersionsRequest(t *testing.T) { - var request *ApiVersionsRequest - - request = new(ApiVersionsRequest) - testRequest(t, "basic", request, apiVersionRequest) -} diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go index 23bc326e..d09e8d9e 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_response.go +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -1,5 +1,6 @@ package sarama +//ApiVersionsResponseBlock is an api version response block type type ApiVersionsResponseBlock struct { ApiKey int16 MinVersion int16 @@ -31,6 +32,7 @@ func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { return nil } +//ApiVersionsResponse is an api version response type type ApiVersionsResponse struct { Err KError ApiVersions []*ApiVersionsResponseBlock @@ -82,6 +84,10 @@ func (r *ApiVersionsResponse) version() int16 { return 0 } +func (a *ApiVersionsResponse) headerVersion() int16 { + return 0 +} + func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/vendor/github.com/Shopify/sarama/api_versions_response_test.go b/vendor/github.com/Shopify/sarama/api_versions_response_test.go deleted file mode 100644 index 675a65a7..00000000 --- a/vendor/github.com/Shopify/sarama/api_versions_response_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package sarama - -import "testing" - -var ( - apiVersionResponse = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x03, - 0x00, 0x02, - 0x00, 0x01, - } -) - -func TestApiVersionsResponse(t *testing.T) { - var response *ApiVersionsResponse - - response = new(ApiVersionsResponse) - testVersionDecodable(t, "no error", response, apiVersionResponse, 0) - if response.Err != ErrNoError { - t.Error("Decoding error failed: no error expected but found", response.Err) - } - if response.ApiVersions[0].ApiKey != 0x03 { - t.Error("Decoding error: expected 0x03 but got", response.ApiVersions[0].ApiKey) - } - if response.ApiVersions[0].MinVersion != 0x02 { - t.Error("Decoding error: expected 0x02 but got", response.ApiVersions[0].MinVersion) - } - if response.ApiVersions[0].MaxVersion != 0x01 { - t.Error("Decoding error: expected 0x01 but got", response.ApiVersions[0].MaxVersion) - } -} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go index 89722554..209fd2d3 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -47,18 +47,78 @@ type AsyncProducer interface { Errors() <-chan *ProducerError } +// transactionManager keeps the state necessary to ensure idempotent production +type transactionManager struct { + producerID int64 + producerEpoch int16 + sequenceNumbers map[string]int32 + mutex sync.Mutex +} + +const ( + noProducerID = -1 + noProducerEpoch = -1 +) + +func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) { + key := fmt.Sprintf("%s-%d", topic, partition) + t.mutex.Lock() + defer t.mutex.Unlock() + sequence := t.sequenceNumbers[key] + t.sequenceNumbers[key] = sequence + 1 + return sequence, t.producerEpoch +} + +func (t *transactionManager) bumpEpoch() { + t.mutex.Lock() + defer t.mutex.Unlock() + t.producerEpoch++ + for k := range t.sequenceNumbers { + t.sequenceNumbers[k] = 0 + } +} + +func (t *transactionManager) getProducerID() (int64, int16) { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.producerID, t.producerEpoch +} + +func newTransactionManager(conf *Config, client Client) (*transactionManager, error) { + txnmgr := &transactionManager{ + producerID: noProducerID, + producerEpoch: noProducerEpoch, + } + + if conf.Producer.Idempotent { + initProducerIDResponse, err := client.InitProducerID() + if err != nil { + return nil, err + } + txnmgr.producerID = initProducerIDResponse.ProducerID + txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch + txnmgr.sequenceNumbers = make(map[string]int32) + txnmgr.mutex = sync.Mutex{} + + Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch) + } + + return txnmgr, nil +} + type asyncProducer struct { - client Client - conf *Config - ownClient bool + client Client + conf *Config errors chan *ProducerError input, successes, retries chan *ProducerMessage inFlight sync.WaitGroup - brokers map[*Broker]chan<- *ProducerMessage - brokerRefs map[chan<- *ProducerMessage]int + brokers map[*Broker]*brokerProducer + brokerRefs map[*brokerProducer]int brokerLock sync.Mutex + + txnmgr *transactionManager } // NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. @@ -67,23 +127,29 @@ func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { if err != nil { return nil, err } - - p, err := NewAsyncProducerFromClient(client) - if err != nil { - return nil, err - } - p.(*asyncProducer).ownClient = true - return p, nil + return newAsyncProducer(client) } // NewAsyncProducerFromClient creates a new Producer using the given client. It is still // necessary to call Close() on the underlying client when shutting down this producer. func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newAsyncProducer(cli) +} + +func newAsyncProducer(client Client) (AsyncProducer, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient } + txnmgr, err := newTransactionManager(client.Config(), client) + if err != nil { + return nil, err + } + p := &asyncProducer{ client: client, conf: client.Config(), @@ -91,8 +157,9 @@ func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { input: make(chan *ProducerMessage), successes: make(chan *ProducerMessage), retries: make(chan *ProducerMessage), - brokers: make(map[*Broker]chan<- *ProducerMessage), - brokerRefs: make(map[chan<- *ProducerMessage]int), + brokers: make(map[*Broker]*brokerProducer), + brokerRefs: make(map[*brokerProducer]int), + txnmgr: txnmgr, } // launch our singleton dispatchers @@ -139,15 +206,25 @@ type ProducerMessage struct { // Partition is the partition that the message was sent to. This is only // guaranteed to be defined if the message was successfully delivered. Partition int32 - // Timestamp is the timestamp assigned to the message by the broker. This - // is only guaranteed to be defined if the message was successfully - // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at - // least version 0.10.0. + // Timestamp can vary in behaviour depending on broker configuration, being + // in either one of the CreateTime or LogAppendTime modes (default CreateTime), + // and requiring version at least 0.10.0. + // + // When configured to CreateTime, the timestamp is specified by the producer + // either by explicitly setting this field, or when the message is added + // to a produce set. + // + // When configured to LogAppendTime, the timestamp assigned to the message + // by the broker. This is only guaranteed to be defined if the message was + // successfully delivered and RequiredAcks is not NoResponse. Timestamp time.Time - retries int - flags flagSet - expectation chan *ProducerError + retries int + flags flagSet + expectation chan *ProducerError + sequenceNumber int32 + producerEpoch int16 + hasSequence bool } const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. @@ -174,6 +251,9 @@ func (m *ProducerMessage) byteSize(version int) int { func (m *ProducerMessage) clear() { m.flags = 0 m.retries = 0 + m.sequenceNumber = 0 + m.producerEpoch = 0 + m.hasSequence = false } // ProducerError is the type of error generated when the producer fails to deliver a message. @@ -187,6 +267,10 @@ func (pe ProducerError) Error() string { return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) } +func (pe ProducerError) Unwrap() error { + return pe.Err +} + // ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. // It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel // when closing a producer. @@ -268,6 +352,10 @@ func (p *asyncProducer) dispatcher() { p.inFlight.Add(1) } + for _, interceptor := range p.conf.Producer.Interceptors { + msg.safelyApplyInterceptor(interceptor) + } + version := 1 if p.conf.Version.IsAtLeast(V0_11_0_0) { version = 2 @@ -347,7 +435,7 @@ func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { var partitions []int32 err := tp.breaker.Run(func() (err error) { - var requiresConsistency = false + requiresConsistency := false if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok { requiresConsistency = ep.MessageRequiresConsistency(msg) } else { @@ -394,9 +482,9 @@ type partitionProducer struct { partition int32 input <-chan *ProducerMessage - leader *Broker - breaker *breaker.Breaker - output chan<- *ProducerMessage + leader *Broker + breaker *breaker.Breaker + brokerProducer *brokerProducer // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, // all other messages get buffered in retryState[msg.retries].buf to preserve ordering @@ -426,21 +514,53 @@ func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan return input } +func (pp *partitionProducer) backoff(retries int) { + var backoff time.Duration + if pp.parent.conf.Producer.Retry.BackoffFunc != nil { + maxRetries := pp.parent.conf.Producer.Retry.Max + backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries) + } else { + backoff = pp.parent.conf.Producer.Retry.Backoff + } + if backoff > 0 { + time.Sleep(backoff) + } +} + func (pp *partitionProducer) dispatch() { // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` // on the first message pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) if pp.leader != nil { - pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader) pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} } + defer func() { + if pp.brokerProducer != nil { + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + } + }() + for msg := range pp.input { + if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil { + select { + case <-pp.brokerProducer.abandoned: + // a message on the abandoned channel means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + pp.brokerProducer = nil + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + default: + // producer connection is still open. + } + } + if msg.retries > pp.highWatermark { // a new, higher, retry level; handle it and then back off pp.newHighWatermark(msg.retries) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + pp.backoff(msg.retries) } else if pp.highWatermark > 0 { // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level if msg.retries < pp.highWatermark { @@ -465,20 +585,25 @@ func (pp *partitionProducer) dispatch() { // if we made it this far then the current msg contains real data, and can be sent to the next goroutine // without breaking any of our ordering guarantees - if pp.output == nil { + if pp.brokerProducer == nil { if err := pp.updateLeader(); err != nil { pp.parent.returnError(msg, err) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + pp.backoff(msg.retries) continue } Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) } - pp.output <- msg - } + // Now that we know we have a broker to actually try and send this message to, generate the sequence + // number for it. + // All messages being retried (sent or not) have already had their retry count updated + // Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer. + if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 { + msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition) + msg.hasSequence = true + } - if pp.output != nil { - pp.parent.unrefBrokerProducer(pp.leader, pp.output) + pp.brokerProducer.input <- msg } } @@ -490,12 +615,12 @@ func (pp *partitionProducer) newHighWatermark(hwm int) { // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) pp.retryState[pp.highWatermark].expectChaser = true pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} // a new HWM means that our current broker selection is out of date Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) - pp.parent.unrefBrokerProducer(pp.leader, pp.output) - pp.output = nil + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + pp.brokerProducer = nil } func (pp *partitionProducer) flushRetryBuffers() { @@ -503,7 +628,7 @@ func (pp *partitionProducer) flushRetryBuffers() { for { pp.highWatermark-- - if pp.output == nil { + if pp.brokerProducer == nil { if err := pp.updateLeader(); err != nil { pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) goto flushDone @@ -512,7 +637,7 @@ func (pp *partitionProducer) flushRetryBuffers() { } for _, msg := range pp.retryState[pp.highWatermark].buf { - pp.output <- msg + pp.brokerProducer.input <- msg } flushDone: @@ -537,16 +662,16 @@ func (pp *partitionProducer) updateLeader() error { return err } - pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader) pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} return nil }) } // one per broker; also constructs an associated flusher -func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage { +func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { var ( input = make(chan *ProducerMessage) bridge = make(chan *produceSet) @@ -559,6 +684,7 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessag input: input, output: bridge, responses: responses, + stopchan: make(chan struct{}), buffer: newProduceSet(p), currentRetries: make(map[string]map[int32]error), } @@ -580,7 +706,11 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessag close(responses) }) - return input + if p.conf.Producer.Retry.Max <= 0 { + bp.abandoned = make(chan struct{}) + } + + return bp } type brokerProducerResponse struct { @@ -595,9 +725,11 @@ type brokerProducer struct { parent *asyncProducer broker *Broker - input <-chan *ProducerMessage + input chan *ProducerMessage output chan<- *produceSet responses <-chan *brokerProducerResponse + abandoned chan struct{} + stopchan chan struct{} buffer *produceSet timer <-chan time.Time @@ -613,12 +745,17 @@ func (bp *brokerProducer) run() { for { select { - case msg := <-bp.input: - if msg == nil { + case msg, ok := <-bp.input: + if !ok { + Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID()) bp.shutdown() return } + if msg == nil { + continue + } + if msg.flags&syn == syn { Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n", bp.broker.ID(), msg.Topic, msg.Partition) @@ -644,12 +781,21 @@ func (bp *brokerProducer) run() { } if bp.buffer.wouldOverflow(msg) { - if err := bp.waitForSpace(msg); err != nil { + Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, false); err != nil { bp.parent.retryMessage(msg, err) continue } } + if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch { + // The epoch was reset, need to roll the buffer over + Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, true); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } if err := bp.buffer.add(msg); err != nil { bp.parent.returnError(msg, err) continue @@ -662,8 +808,14 @@ func (bp *brokerProducer) run() { bp.timerFired = true case output <- bp.buffer: bp.rollOver() - case response := <-bp.responses: - bp.handleResponse(response) + case response, ok := <-bp.responses: + if ok { + bp.handleResponse(response) + } + case <-bp.stopchan: + Logger.Printf( + "producer/broker/%d run loop asked to stop\n", bp.broker.ID()) + return } if bp.timerFired || bp.buffer.readyToFlush() { @@ -687,7 +839,7 @@ func (bp *brokerProducer) shutdown() { for response := range bp.responses { bp.handleResponse(response) } - + close(bp.stopchan) Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) } @@ -699,9 +851,7 @@ func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { return bp.currentRetries[msg.Topic][msg.Partition] } -func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { - Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) - +func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error { for { select { case response := <-bp.responses: @@ -709,7 +859,7 @@ func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { // handling a response can change our state, so re-check some things if reason := bp.needsRetry(msg); reason != nil { return reason - } else if !bp.buffer.wouldOverflow(msg) { + } else if !bp.buffer.wouldOverflow(msg) && !forceRollover { return nil } case bp.output <- bp.buffer: @@ -740,16 +890,17 @@ func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { // we iterate through the blocks in the request set, not the response, so that we notice // if the response is missing a block completely - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + var retryTopics []string + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { if response == nil { // this only happens when RequiredAcks is NoResponse, so we have to assume success - bp.parent.returnSuccesses(msgs) + bp.parent.returnSuccesses(pSet.msgs) return } block := response.GetBlock(topic, partition) if block == nil { - bp.parent.returnErrors(msgs, ErrIncompleteResponse) + bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse) return } @@ -757,45 +908,115 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo // Success case ErrNoError: if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { - for _, msg := range msgs { + for _, msg := range pSet.msgs { msg.Timestamp = block.Timestamp } } - for i, msg := range msgs { + for i, msg := range pSet.msgs { msg.Offset = block.Offset + int64(i) } - bp.parent.returnSuccesses(msgs) + bp.parent.returnSuccesses(pSet.msgs) + // Duplicate + case ErrDuplicateSequenceNumber: + bp.parent.returnSuccesses(pSet.msgs) // Retriable errors case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: - Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", - bp.broker.ID(), topic, partition, block.Err) - bp.currentRetries[topic][partition] = block.Err - bp.parent.retryMessages(msgs, block.Err) - bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) + if bp.parent.conf.Producer.Retry.Max <= 0 { + bp.parent.abandonBrokerConnection(bp.broker) + bp.parent.returnErrors(pSet.msgs, block.Err) + } else { + retryTopics = append(retryTopics, topic) + } // Other non-retriable errors default: - bp.parent.returnErrors(msgs, block.Err) + if bp.parent.conf.Producer.Retry.Max <= 0 { + bp.parent.abandonBrokerConnection(bp.broker) + } + bp.parent.returnErrors(pSet.msgs, block.Err) } }) + + if len(retryTopics) > 0 { + if bp.parent.conf.Producer.Idempotent { + err := bp.parent.client.RefreshMetadata(retryTopics...) + if err != nil { + Logger.Printf("Failed refreshing metadata because of %v\n", err) + } + } + + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + block := response.GetBlock(topic, partition) + if block == nil { + // handled in the previous "eachPartition" loop + return + } + + switch block.Err { + case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", + bp.broker.ID(), topic, partition, block.Err) + if bp.currentRetries[topic] == nil { + bp.currentRetries[topic] = make(map[int32]error) + } + bp.currentRetries[topic][partition] = block.Err + if bp.parent.conf.Producer.Idempotent { + go bp.parent.retryBatch(topic, partition, pSet, block.Err) + } else { + bp.parent.retryMessages(pSet.msgs, block.Err) + } + // dropping the following messages has the side effect of incrementing their retry count + bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) + } + }) + } +} + +func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) { + Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr) + produceSet := newProduceSet(p) + produceSet.msgs[topic] = make(map[int32]*partitionSet) + produceSet.msgs[topic][partition] = pSet + produceSet.bufferBytes += pSet.bufferBytes + produceSet.bufferCount += len(pSet.msgs) + for _, msg := range pSet.msgs { + if msg.retries >= p.conf.Producer.Retry.Max { + p.returnError(msg, kerr) + return + } + msg.retries++ + } + + // it's expected that a metadata refresh has been requested prior to calling retryBatch + leader, err := p.client.Leader(topic, partition) + if err != nil { + Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err) + for _, msg := range pSet.msgs { + p.returnError(msg, kerr) + } + return + } + bp := p.getBrokerProducer(leader) + bp.output <- produceSet } func (bp *brokerProducer) handleError(sent *produceSet, err error) { switch err.(type) { case PacketEncodingError: - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.returnErrors(msgs, err) + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.returnErrors(pSet.msgs, err) }) default: Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) bp.parent.abandonBrokerConnection(bp.broker) _ = bp.broker.Close() bp.closing = err - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.retryMessages(msgs, err) + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.retryMessages(pSet.msgs, err) }) - bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.retryMessages(msgs, err) + bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.retryMessages(pSet.msgs, err) }) bp.rollOver() } @@ -837,11 +1058,9 @@ func (p *asyncProducer) shutdown() { p.inFlight.Wait() - if p.ownClient { - err := p.client.Close() - if err != nil { - Logger.Println("producer/shutdown failed to close the embedded client:", err) - } + err := p.client.Close() + if err != nil { + Logger.Println("producer/shutdown failed to close the embedded client:", err) } close(p.input) @@ -851,6 +1070,12 @@ func (p *asyncProducer) shutdown() { } func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + // We need to reset the producer ID epoch if we set a sequence number on it, because the broker + // will never see a message with this number, so we can never continue the sequence. + if msg.hasSequence { + Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition) + p.txnmgr.bumpEpoch() + } msg.clear() pErr := &ProducerError{Msg: msg, Err: err} if p.conf.Producer.Return.Errors { @@ -892,7 +1117,7 @@ func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { } } -func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage { +func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer { p.brokerLock.Lock() defer p.brokerLock.Unlock() @@ -909,13 +1134,13 @@ func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessag return bp } -func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) { +func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) { p.brokerLock.Lock() defer p.brokerLock.Unlock() p.brokerRefs[bp]-- if p.brokerRefs[bp] == 0 { - close(bp) + close(bp.input) delete(p.brokerRefs, bp) if p.brokers[broker] == bp { @@ -928,5 +1153,10 @@ func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { p.brokerLock.Lock() defer p.brokerLock.Unlock() + bc, ok := p.brokers[broker] + if ok && bc.abandoned != nil { + close(bc.abandoned) + } + delete(p.brokers, broker) } diff --git a/vendor/github.com/Shopify/sarama/async_producer_test.go b/vendor/github.com/Shopify/sarama/async_producer_test.go deleted file mode 100644 index 478dca4c..00000000 --- a/vendor/github.com/Shopify/sarama/async_producer_test.go +++ /dev/null @@ -1,845 +0,0 @@ -package sarama - -import ( - "errors" - "log" - "os" - "os/signal" - "sync" - "testing" - "time" -) - -const TestMessage = "ABC THE MESSAGE" - -func closeProducer(t *testing.T, p AsyncProducer) { - var wg sync.WaitGroup - p.AsyncClose() - - wg.Add(2) - go func() { - for range p.Successes() { - t.Error("Unexpected message on Successes()") - } - wg.Done() - }() - go func() { - for msg := range p.Errors() { - t.Error(msg.Err) - } - wg.Done() - }() - wg.Wait() -} - -func expectResults(t *testing.T, p AsyncProducer, successes, errors int) { - expect := successes + errors - for expect > 0 { - select { - case msg := <-p.Errors(): - if msg.Msg.flags != 0 { - t.Error("Message had flags set") - } - errors-- - expect-- - if errors < 0 { - t.Error(msg.Err) - } - case msg := <-p.Successes(): - if msg.flags != 0 { - t.Error("Message had flags set") - } - successes-- - expect-- - if successes < 0 { - t.Error("Too many successes") - } - } - } - if successes != 0 || errors != 0 { - t.Error("Unexpected successes", successes, "or errors", errors) - } -} - -type testPartitioner chan *int32 - -func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) { - part := <-p - if part == nil { - return 0, errors.New("BOOM") - } - - return *part, nil -} - -func (p testPartitioner) RequiresConsistency() bool { - return true -} - -func (p testPartitioner) feed(partition int32) { - p <- &partition -} - -type flakyEncoder bool - -func (f flakyEncoder) Length() int { - return len(TestMessage) -} - -func (f flakyEncoder) Encode() ([]byte, error) { - if !bool(f) { - return nil, errors.New("flaky encoding error") - } - return []byte(TestMessage), nil -} - -func TestAsyncProducer(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i} - } - for i := 0; i < 10; i++ { - select { - case msg := <-producer.Errors(): - t.Error(msg.Err) - if msg.Msg.flags != 0 { - t.Error("Message had flags set") - } - case msg := <-producer.Successes(): - if msg.flags != 0 { - t.Error("Message had flags set") - } - if msg.Metadata.(int) != i { - t.Error("Message metadata did not match") - } - case <-time.After(time.Second): - t.Errorf("Timeout waiting for msg #%d", i) - goto done - } - } -done: - closeProducer(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestAsyncProducerMultipleFlushes(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - leader.Returns(prodSuccess) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 5 - config.Producer.Return.Successes = true - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for flush := 0; flush < 3; flush++ { - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - expectResults(t, producer, 5, 0) - } - - closeProducer(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestAsyncProducerMultipleBrokers(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader0 := NewMockBroker(t, 2) - leader1 := NewMockBroker(t, 3) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID()) - metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodResponse0 := new(ProduceResponse) - prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError) - leader0.Returns(prodResponse0) - - prodResponse1 := new(ProduceResponse) - prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError) - leader1.Returns(prodResponse1) - - config := NewConfig() - config.Producer.Flush.Messages = 5 - config.Producer.Return.Successes = true - config.Producer.Partitioner = NewRoundRobinPartitioner - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - expectResults(t, producer, 10, 0) - - closeProducer(t, producer) - leader1.Close() - leader0.Close() - seedBroker.Close() -} - -func TestAsyncProducerCustomPartitioner(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodResponse := new(ProduceResponse) - prodResponse.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodResponse) - - config := NewConfig() - config.Producer.Flush.Messages = 2 - config.Producer.Return.Successes = true - config.Producer.Partitioner = func(topic string) Partitioner { - p := make(testPartitioner) - go func() { - p.feed(0) - p <- nil - p <- nil - p <- nil - p.feed(0) - }() - return p - } - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - expectResults(t, producer, 2, 3) - - closeProducer(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestAsyncProducerFailureRetry(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader1 := NewMockBroker(t, 2) - leader2 := NewMockBroker(t, 3) - - metadataLeader1 := new(MetadataResponse) - metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader1) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - seedBroker.Close() - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader1.Returns(prodNotLeader) - - metadataLeader2 := new(MetadataResponse) - metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) - metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) - leader1.Returns(metadataLeader2) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - leader1.Close() - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - leader2.Close() - closeProducer(t, producer) -} - -func TestAsyncProducerEncoderFailures(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - leader.Returns(prodSuccess) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 1 - config.Producer.Return.Successes = true - config.Producer.Partitioner = NewManualPartitioner - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for flush := 0; flush < 3; flush++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(false)} - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(false), Value: flakyEncoder(true)} - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(true)} - expectResults(t, producer, 1, 2) - } - - closeProducer(t, producer) - leader.Close() - seedBroker.Close() -} - -// If a Kafka broker becomes unavailable and then returns back in service, then -// producer reconnects to it and continues sending messages. -func TestAsyncProducerBrokerBounce(t *testing.T) { - // Given - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - leaderAddr := leader.Addr() - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - - config := NewConfig() - config.Producer.Flush.Messages = 1 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // When: a broker connection gets reset by a broker (network glitch, restart, you name it). - leader.Close() // producer should get EOF - leader = NewMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles - seedBroker.Returns(metadataResponse) // tell it to go to broker 2 again - - // Then: a produced message goes through the new broker connection. - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - closeProducer(t, producer) - seedBroker.Close() - leader.Close() -} - -func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader1 := NewMockBroker(t, 2) - leader2 := NewMockBroker(t, 3) - - metadataLeader1 := new(MetadataResponse) - metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader1) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Max = 3 - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - leader1.Close() // producer should get EOF - seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down - seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down - - // ok fine, tell it to go to leader2 finally - metadataLeader2 := new(MetadataResponse) - metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) - metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader2) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - seedBroker.Close() - leader2.Close() - - closeProducer(t, producer) -} - -func TestAsyncProducerMultipleRetries(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader1 := NewMockBroker(t, 2) - leader2 := NewMockBroker(t, 3) - - metadataLeader1 := new(MetadataResponse) - metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader1) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Max = 4 - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader1.Returns(prodNotLeader) - - metadataLeader2 := new(MetadataResponse) - metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) - metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader2) - leader2.Returns(prodNotLeader) - seedBroker.Returns(metadataLeader1) - leader1.Returns(prodNotLeader) - seedBroker.Returns(metadataLeader1) - leader1.Returns(prodNotLeader) - seedBroker.Returns(metadataLeader2) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - seedBroker.Close() - leader1.Close() - leader2.Close() - closeProducer(t, producer) -} - -func TestAsyncProducerOutOfRetries(t *testing.T) { - t.Skip("Enable once bug #294 is fixed.") - - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - config.Producer.Retry.Max = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader.Returns(prodNotLeader) - - for i := 0; i < 10; i++ { - select { - case msg := <-producer.Errors(): - if msg.Err != ErrNotLeaderForPartition { - t.Error(msg.Err) - } - case <-producer.Successes(): - t.Error("Unexpected success") - } - } - - seedBroker.Returns(metadataResponse) - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - expectResults(t, producer, 10, 0) - - leader.Close() - seedBroker.Close() - safeClose(t, producer) -} - -func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - leaderAddr := leader.Addr() - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - config.Producer.Retry.Max = 1 - config.Producer.Partitioner = NewRoundRobinPartitioner - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - // prime partition 0 - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // prime partition 1 - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - prodSuccess = new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // reboot the broker (the producer will get EOF on its existing connection) - leader.Close() - leader = NewMockBrokerAddr(t, 2, leaderAddr) - - // send another message on partition 0 to trigger the EOF and retry - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - - // tell partition 0 to go to that broker again - seedBroker.Returns(metadataResponse) - - // succeed this time - prodSuccess = new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // shutdown - closeProducer(t, producer) - seedBroker.Close() - leader.Close() -} - -func TestAsyncProducerFlusherRetryCondition(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Producer.Flush.Messages = 5 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - config.Producer.Retry.Max = 1 - config.Producer.Partitioner = NewManualPartitioner - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - // prime partitions - for p := int32(0); p < 2; p++ { - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p} - } - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", p, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 5, 0) - } - - // send more messages on partition 0 - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} - } - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader.Returns(prodNotLeader) - - time.Sleep(50 * time.Millisecond) - - leader.SetHandlerByMap(map[string]MockResponse{ - "ProduceRequest": NewMockProduceResponse(t). - SetVersion(0). - SetError("my_topic", 0, ErrNoError), - }) - - // tell partition 0 to go to that broker again - seedBroker.Returns(metadataResponse) - - // succeed this time - expectResults(t, producer, 5, 0) - - // put five more through - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} - } - expectResults(t, producer, 5, 0) - - // shutdown - closeProducer(t, producer) - seedBroker.Close() - leader.Close() -} - -func TestAsyncProducerRetryShutdown(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataLeader := new(MetadataResponse) - metadataLeader.AddBroker(leader.Addr(), leader.BrokerID()) - metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - producer.AsyncClose() - time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in - - producer.Input() <- &ProducerMessage{Topic: "FOO"} - if err := <-producer.Errors(); err.Err != ErrShuttingDown { - t.Error(err) - } - - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader.Returns(prodNotLeader) - - seedBroker.Returns(metadataLeader) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - seedBroker.Close() - leader.Close() - - // wait for the async-closed producer to shut down fully - for err := range producer.Errors() { - t.Error(err) - } -} - -func TestAsyncProducerNoReturns(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataLeader := new(MetadataResponse) - metadataLeader.AddBroker(leader.Addr(), leader.BrokerID()) - metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = false - config.Producer.Return.Errors = false - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - - wait := make(chan bool) - go func() { - if err := producer.Close(); err != nil { - t.Error(err) - } - close(wait) - }() - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - <-wait - seedBroker.Close() - leader.Close() -} - -// This example shows how to use the producer while simultaneously -// reading the Errors channel to know about any failures. -func ExampleAsyncProducer_select() { - producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil) - if err != nil { - panic(err) - } - - defer func() { - if err := producer.Close(); err != nil { - log.Fatalln(err) - } - }() - - // Trap SIGINT to trigger a shutdown. - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt) - - var enqueued, errors int -ProducerLoop: - for { - select { - case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}: - enqueued++ - case err := <-producer.Errors(): - log.Println("Failed to produce message", err) - errors++ - case <-signals: - break ProducerLoop - } - } - - log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors) -} - -// This example shows how to use the producer with separate goroutines -// reading from the Successes and Errors channels. Note that in order -// for the Successes channel to be populated, you have to set -// config.Producer.Return.Successes to true. -func ExampleAsyncProducer_goroutines() { - config := NewConfig() - config.Producer.Return.Successes = true - producer, err := NewAsyncProducer([]string{"localhost:9092"}, config) - if err != nil { - panic(err) - } - - // Trap SIGINT to trigger a graceful shutdown. - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt) - - var ( - wg sync.WaitGroup - enqueued, successes, errors int - ) - - wg.Add(1) - go func() { - defer wg.Done() - for range producer.Successes() { - successes++ - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for err := range producer.Errors() { - log.Println(err) - errors++ - } - }() - -ProducerLoop: - for { - message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} - select { - case producer.Input() <- message: - enqueued++ - - case <-signals: - producer.AsyncClose() // Trigger a shutdown of the producer. - break ProducerLoop - } - } - - wg.Wait() - - log.Printf("Successfully produced: %d; errors: %d\n", successes, errors) -} diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go index e78988d7..8f7634f9 100644 --- a/vendor/github.com/Shopify/sarama/balance_strategy.go +++ b/vendor/github.com/Shopify/sarama/balance_strategy.go @@ -1,8 +1,25 @@ package sarama import ( + "container/heap" + "errors" + "fmt" "math" "sort" + "strings" +) + +const ( + // RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy + RangeBalanceStrategyName = "range" + + // RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy + RoundRobinBalanceStrategyName = "roundrobin" + + // StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy + StickyBalanceStrategyName = "sticky" + + defaultGeneration = -1 ) // BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt. @@ -24,7 +41,7 @@ func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) { // -------------------------------------------------------------------- // BalanceStrategy is used to balance topics and partitions -// across memebers of a consumer group +// across members of a consumer group type BalanceStrategy interface { // Name uniquely identifies the strategy. Name() string @@ -32,6 +49,10 @@ type BalanceStrategy interface { // Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions` // and returns a distribution plan. Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) + + // AssignmentData returns the serialized assignment data for the specified + // memberID + AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) } // -------------------------------------------------------------------- @@ -41,7 +62,7 @@ type BalanceStrategy interface { // M1: {T: [0, 1, 2]} // M2: {T: [3, 4, 5]} var BalanceStrategyRange = &balanceStrategy{ - name: "range", + name: RangeBalanceStrategyName, coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { step := float64(len(partitions)) / float64(len(memberIDs)) @@ -54,19 +75,18 @@ var BalanceStrategyRange = &balanceStrategy{ }, } -// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments +// while maintain a balanced partition distribution. // Example with topic T with six partitions (0..5) and two members (M1, M2): // M1: {T: [0, 2, 4]} // M2: {T: [1, 3, 5]} -var BalanceStrategyRoundRobin = &balanceStrategy{ - name: "roundrobin", - coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { - for i, part := range partitions { - memberID := memberIDs[i%len(memberIDs)] - plan.Add(memberID, topic, part) - } - }, -} +// +// On reassignment with an additional consumer, you might get an assignment plan like: +// M1: {T: [0, 2]} +// M2: {T: [1, 3]} +// M3: {T: [4, 5]} +// +var BalanceStrategySticky = &stickyBalanceStrategy{} // -------------------------------------------------------------------- @@ -78,7 +98,7 @@ type balanceStrategy struct { // Name implements BalanceStrategy. func (s *balanceStrategy) Name() string { return s.name } -// Balance implements BalanceStrategy. +// Plan implements BalanceStrategy. func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { // Build members by topic map mbt := make(map[string][]string) @@ -104,6 +124,11 @@ func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, t return plan, nil } +// AssignmentData simple strategies do not require any shared assignment data +func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return nil, nil +} + type balanceStrategySortable struct { topic string memberIDs []string @@ -127,3 +152,984 @@ func balanceStrategyHashValue(vv ...string) uint32 { } return h } + +type stickyBalanceStrategy struct { + movements partitionMovements +} + +// Name implements BalanceStrategy. +func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName } + +// Plan implements BalanceStrategy. +func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { + // track partition movements during generation of the partition assignment plan + s.movements = partitionMovements{ + Movements: make(map[topicPartitionAssignment]consumerPair), + PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool), + } + + // prepopulate the current assignment state from userdata on the consumer group members + currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members) + if err != nil { + return nil, err + } + + // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state + isFreshAssignment := false + if len(currentAssignment) == 0 { + isFreshAssignment = true + } + + // create a mapping of all current topic partitions and the consumers that can be assigned to them + partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string) + for topic, partitions := range topics { + for _, partition := range partitions { + partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{} + } + } + + // create a mapping of all consumers to all potential topic partitions that can be assigned to them + // also, populate the mapping of partitions to potential consumers + consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members)) + for memberID, meta := range members { + consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0) + for _, topicSubscription := range meta.Topics { + // only evaluate topic subscriptions that are present in the supplied topics map + if _, found := topics[topicSubscription]; found { + for _, partition := range topics[topicSubscription] { + topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition} + consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition) + partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID) + } + } + } + + // add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist + if _, exists := currentAssignment[memberID]; !exists { + currentAssignment[memberID] = make([]topicPartitionAssignment, 0) + } + } + + // create a mapping of each partition to its current consumer, where possible + currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment)) + unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers)) + for partition := range partition2AllPotentialConsumers { + unvisitedPartitions[partition] = true + } + var unassignedPartitions []topicPartitionAssignment + for memberID, partitions := range currentAssignment { + var keepPartitions []topicPartitionAssignment + for _, partition := range partitions { + // If this partition no longer exists at all, likely due to the + // topic being deleted, we remove the partition from the member. + if _, exists := partition2AllPotentialConsumers[partition]; !exists { + continue + } + delete(unvisitedPartitions, partition) + currentPartitionConsumers[partition] = memberID + + if !strsContains(members[memberID].Topics, partition.Topic) { + unassignedPartitions = append(unassignedPartitions, partition) + continue + } + keepPartitions = append(keepPartitions, partition) + } + currentAssignment[memberID] = keepPartitions + } + for unvisited := range unvisitedPartitions { + unassignedPartitions = append(unassignedPartitions, unvisited) + } + + // sort the topic partitions in order of priority for reassignment + sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions) + + // at this point we have preserved all valid topic partition to consumer assignments and removed + // all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions + // to consumers so that the topic partition assignments are as balanced as possible. + + // an ascending sorted set of consumers based on how many topic partitions are already assigned to them + sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment) + s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers) + + // Assemble plan + plan := make(BalanceStrategyPlan, len(currentAssignment)) + for memberID, assignments := range currentAssignment { + if len(assignments) == 0 { + plan[memberID] = make(map[string][]int32) + } else { + for _, assignment := range assignments { + plan.Add(memberID, assignment.Topic, assignment.Partition) + } + } + } + return plan, nil +} + +// AssignmentData serializes the set of topics currently assigned to the +// specified member as part of the supplied balance plan +func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return encode(&StickyAssignorUserDataV1{ + Topics: topics, + Generation: generationID, + }, nil) +} + +func strsContains(s []string, value string) bool { + for _, entry := range s { + if entry == value { + return true + } + } + return false +} + +// Balance assignments across consumers for maximum fairness and stickiness. +func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) { + initializing := false + if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 { + initializing = true + } + + // assign all unassigned partitions + for _, partition := range unassignedPartitions { + // skip if there is no potential consumer for the partition + if len(partition2AllPotentialConsumers[partition]) == 0 { + continue + } + sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer) + } + + // narrow down the reassignment scope to only those partitions that can actually be reassigned + for partition := range partition2AllPotentialConsumers { + if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) { + sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition) + } + } + + // narrow down the reassignment scope to only those consumers that are subject to reassignment + fixedAssignments := make(map[string][]topicPartitionAssignment) + for memberID := range consumer2AllPotentialPartitions { + if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) { + fixedAssignments[memberID] = currentAssignment[memberID] + delete(currentAssignment, memberID) + sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment) + } + } + + // create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later + preBalanceAssignment := deepCopyAssignment(currentAssignment) + preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer)) + for k, v := range currentPartitionConsumer { + preBalancePartitionConsumers[k] = v + } + + reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer) + + // if we are not preserving existing assignments and we have made changes to the current assignment + // make sure we are getting a more balanced assignment; otherwise, revert to previous assignment + if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) { + currentAssignment = deepCopyAssignment(preBalanceAssignment) + currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers)) + for k, v := range preBalancePartitionConsumers { + currentPartitionConsumer[k] = v + } + } + + // add the fixed assignments (those that could not change) back + for consumer, assignments := range fixedAssignments { + currentAssignment[consumer] = assignments + } +} + +// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2): +// M0: [t0p0, t0p2, t1p1] +// M1: [t0p1, t1p0, t1p2] +var BalanceStrategyRoundRobin = new(roundRobinBalancer) + +type roundRobinBalancer struct{} + +func (b *roundRobinBalancer) Name() string { + return RoundRobinBalanceStrategyName +} + +func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { + if len(memberAndMetadata) == 0 || len(topics) == 0 { + return nil, errors.New("members and topics are not provided") + } + // sort partitions + var topicPartitions []topicAndPartition + for topic, partitions := range topics { + for _, partition := range partitions { + topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition}) + } + } + sort.SliceStable(topicPartitions, func(i, j int) bool { + pi := topicPartitions[i] + pj := topicPartitions[j] + return pi.comparedValue() < pj.comparedValue() + }) + + // sort members + var members []memberAndTopic + for memberID, meta := range memberAndMetadata { + m := memberAndTopic{ + memberID: memberID, + topics: make(map[string]struct{}), + } + for _, t := range meta.Topics { + m.topics[t] = struct{}{} + } + members = append(members, m) + } + sort.SliceStable(members, func(i, j int) bool { + mi := members[i] + mj := members[j] + return mi.memberID < mj.memberID + }) + + // assign partitions + plan := make(BalanceStrategyPlan, len(members)) + i := 0 + n := len(members) + for _, tp := range topicPartitions { + m := members[i%n] + for !m.hasTopic(tp.topic) { + i++ + m = members[i%n] + } + plan.Add(m.memberID, tp.topic, tp.partition) + i++ + } + return plan, nil +} + +func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return nil, nil // do nothing for now +} + +type topicAndPartition struct { + topic string + partition int32 +} + +func (tp *topicAndPartition) comparedValue() string { + return fmt.Sprintf("%s-%d", tp.topic, tp.partition) +} + +type memberAndTopic struct { + memberID string + topics map[string]struct{} +} + +func (m *memberAndTopic) hasTopic(topic string) bool { + _, isExist := m.topics[topic] + return isExist +} + +// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs. +// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0. +// Lower balance score indicates a more balanced assignment. +func getBalanceScore(assignment map[string][]topicPartitionAssignment) int { + consumer2AssignmentSize := make(map[string]int, len(assignment)) + for memberID, partitions := range assignment { + consumer2AssignmentSize[memberID] = len(partitions) + } + + var score float64 + for memberID, consumerAssignmentSize := range consumer2AssignmentSize { + delete(consumer2AssignmentSize, memberID) + for _, otherConsumerAssignmentSize := range consumer2AssignmentSize { + score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize)) + } + } + return int(score) +} + +// Determine whether the current assignment plan is balanced. +func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool { + sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment) + min := len(currentAssignment[sortedCurrentSubscriptions[0]]) + max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]]) + if min >= max-1 { + // if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true + return true + } + + // create a mapping from partitions to the consumer assigned to them + allPartitions := make(map[topicPartitionAssignment]string) + for memberID, partitions := range currentAssignment { + for _, partition := range partitions { + if _, exists := allPartitions[partition]; exists { + Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition) + } + allPartitions[partition] = memberID + } + } + + // for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it + // could but did not get cannot be moved to it (because that would break the balance) + for _, memberID := range sortedCurrentSubscriptions { + consumerPartitions := currentAssignment[memberID] + consumerPartitionCount := len(consumerPartitions) + + // skip if this consumer already has all the topic partitions it can get + if consumerPartitionCount == len(allSubscriptions[memberID]) { + continue + } + + // otherwise make sure it cannot get any more + potentialTopicPartitions := allSubscriptions[memberID] + for _, partition := range potentialTopicPartitions { + if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) { + otherConsumer := allPartitions[partition] + otherConsumerPartitionCount := len(currentAssignment[otherConsumer]) + if consumerPartitionCount < otherConsumerPartitionCount { + return false + } + } + } + } + return true +} + +// Reassign all topic partitions that need reassignment until balanced. +func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool { + reassignmentPerformed := false + modified := false + + // repeat reassignment until no partition can be moved to improve the balance + for { + modified = false + // reassign all reassignable partitions (starting from the partition with least potential consumers and if needed) + // until the full list is processed or a balance is achieved + for _, partition := range reassignablePartitions { + if isBalanced(currentAssignment, consumer2AllPotentialPartitions) { + break + } + + // the partition must have at least two consumers + if len(partition2AllPotentialConsumers[partition]) <= 1 { + Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition) + } + + // the partition must have a consumer + consumer := currentPartitionConsumer[partition] + if consumer == "" { + Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition) + } + + if _, exists := prevAssignment[partition]; exists { + if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) { + sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID) + reassignmentPerformed = true + modified = true + continue + } + } + + // check if a better-suited consumer exists for the partition; if so, reassign it + for _, otherConsumer := range partition2AllPotentialConsumers[partition] { + if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) { + sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions) + reassignmentPerformed = true + modified = true + break + } + } + } + if !modified { + return reassignmentPerformed + } + } +} + +// Identify a new consumer for a topic partition and reassign it. +func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string { + for _, anotherConsumer := range sortedCurrentSubscriptions { + if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) { + return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer) + } + } + return sortedCurrentSubscriptions +} + +// Reassign a specific partition to a new consumer +func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string { + consumer := currentPartitionConsumer[partition] + // find the correct partition movement considering the stickiness requirement + partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer) + return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer) +} + +// Track the movement of a topic partition after assignment +func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string { + oldConsumer := currentPartitionConsumer[partition] + s.movements.movePartition(partition, oldConsumer, newConsumer) + + currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition) + currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition) + currentPartitionConsumer[partition] = newConsumer + return sortMemberIDsByPartitionAssignments(currentAssignment) +} + +// Determine whether a specific consumer should be considered for topic partition assignment. +func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool { + currentPartitions := currentAssignment[memberID] + currentAssignmentSize := len(currentPartitions) + maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID]) + if currentAssignmentSize > maxAssignmentSize { + Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID) + } + if currentAssignmentSize < maxAssignmentSize { + // if a consumer is not assigned all its potential partitions it is subject to reassignment + return true + } + for _, partition := range currentPartitions { + if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) { + return true + } + } + return false +} + +// Only consider reassigning those topic partitions that have two or more potential consumers. +func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool { + return len(partition2AllPotentialConsumers[partition]) >= 2 +} + +// The assignment should improve the overall balance of the partition assignments to consumers. +func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string { + for _, memberID := range sortedCurrentSubscriptions { + if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) { + currentAssignment[memberID] = append(currentAssignment[memberID], partition) + currentPartitionConsumer[partition] = memberID + break + } + } + return sortMemberIDsByPartitionAssignments(currentAssignment) +} + +// Deserialize topic partition assignment data to aid with creation of a sticky assignment. +func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) { + userDataV1 := &StickyAssignorUserDataV1{} + if err := decode(userDataBytes, userDataV1); err != nil { + userDataV0 := &StickyAssignorUserDataV0{} + if err := decode(userDataBytes, userDataV0); err != nil { + return nil, err + } + return userDataV0, nil + } + return userDataV1, nil +} + +// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited +// to those topic partitions currently reported by the Kafka cluster. +func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment { + assignments := deepCopyAssignment(currentAssignment) + for memberID, partitions := range assignments { + // perform in-place filtering + i := 0 + for _, partition := range partitions { + if _, exists := partition2AllPotentialConsumers[partition]; exists { + partitions[i] = partition + i++ + } + } + assignments[memberID] = partitions[:i] + } + return assignments +} + +func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment { + for i, assignment := range assignments { + if assignment == topic { + return append(assignments[:i], assignments[i+1:]...) + } + } + return assignments +} + +func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool { + for _, assignment := range assignments { + if assignment == topic { + return true + } + } + return false +} + +func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment { + unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers)) + for partition := range partition2AllPotentialConsumers { + unassignedPartitions[partition] = true + } + + sortedPartitions := make([]topicPartitionAssignment, 0) + if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) { + // if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics) + // then we just need to simply list partitions in a round robin fashion (from consumers with + // most assigned partitions to those with least) + assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers) + + // use priority-queue to evaluate consumer group members in descending-order based on + // the number of topic partition assignments (i.e. consumers with most assignments first) + pq := make(assignmentPriorityQueue, len(assignments)) + i := 0 + for consumerID, consumerAssignments := range assignments { + pq[i] = &consumerGroupMember{ + id: consumerID, + assignments: consumerAssignments, + } + i++ + } + heap.Init(&pq) + + for { + // loop until no consumer-group members remain + if pq.Len() == 0 { + break + } + member := pq[0] + + // partitions that were assigned to a different consumer last time + var prevPartitionIndex int + for i, partition := range member.assignments { + if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists { + prevPartitionIndex = i + break + } + } + + if len(member.assignments) > 0 { + partition := member.assignments[prevPartitionIndex] + sortedPartitions = append(sortedPartitions, partition) + delete(unassignedPartitions, partition) + if prevPartitionIndex == 0 { + member.assignments = member.assignments[1:] + } else { + member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...) + } + heap.Fix(&pq, 0) + } else { + heap.Pop(&pq) + } + } + + for partition := range unassignedPartitions { + sortedPartitions = append(sortedPartitions, partition) + } + } else { + // an ascending sorted set of topic partitions based on how many consumers can potentially use them + sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers) + } + return sortedPartitions +} + +func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string { + // sort the members by the number of partition assignments in ascending order + sortedMemberIDs := make([]string, 0, len(assignments)) + for memberID := range assignments { + sortedMemberIDs = append(sortedMemberIDs, memberID) + } + sort.SliceStable(sortedMemberIDs, func(i, j int) bool { + ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]]) + if ret == 0 { + return sortedMemberIDs[i] < sortedMemberIDs[j] + } + return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]]) + }) + return sortedMemberIDs +} + +func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment { + // sort the members by the number of partition assignments in descending order + sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers)) + i := 0 + for partition := range partition2AllPotentialConsumers { + sortedPartionIDs[i] = partition + i++ + } + sort.Slice(sortedPartionIDs, func(i, j int) bool { + if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) { + ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic) + if ret == 0 { + return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition + } + return ret < 0 + } + return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) + }) + return sortedPartionIDs +} + +func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment { + m := make(map[string][]topicPartitionAssignment, len(assignment)) + for memberID, subscriptions := range assignment { + m[memberID] = append(subscriptions[:0:0], subscriptions...) + } + return m +} + +func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool { + curMembers := make(map[string]int) + for _, cur := range partition2AllPotentialConsumers { + if len(curMembers) == 0 { + for _, curMembersElem := range cur { + curMembers[curMembersElem]++ + } + continue + } + + if len(curMembers) != len(cur) { + return false + } + + yMap := make(map[string]int) + for _, yElem := range cur { + yMap[yElem]++ + } + + for curMembersMapKey, curMembersMapVal := range curMembers { + if yMap[curMembersMapKey] != curMembersMapVal { + return false + } + } + } + + curPartitions := make(map[topicPartitionAssignment]int) + for _, cur := range consumer2AllPotentialPartitions { + if len(curPartitions) == 0 { + for _, curPartitionElem := range cur { + curPartitions[curPartitionElem]++ + } + continue + } + + if len(curPartitions) != len(cur) { + return false + } + + yMap := make(map[topicPartitionAssignment]int) + for _, yElem := range cur { + yMap[yElem]++ + } + + for curMembersMapKey, curMembersMapVal := range curPartitions { + if yMap[curMembersMapKey] != curMembersMapVal { + return false + } + } + } + return true +} + +// We need to process subscriptions' user data with each consumer's reported generation in mind +// higher generations overwrite lower generations in case of a conflict +// note that a conflict could exist only if user data is for different generations +func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) { + currentAssignment := make(map[string][]topicPartitionAssignment) + prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair) + + // for each partition we create a sorted map of its consumers by generation + sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string) + for memberID, meta := range members { + consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData) + if err != nil { + return nil, nil, err + } + for _, partition := range consumerUserData.partitions() { + if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists { + if consumerUserData.hasGeneration() { + if _, generationExists := consumers[consumerUserData.generation()]; generationExists { + // same partition is assigned to two consumers during the same rebalance. + // log a warning and skip this record + Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation()) + continue + } else { + consumers[consumerUserData.generation()] = memberID + } + } else { + consumers[defaultGeneration] = memberID + } + } else { + generation := defaultGeneration + if consumerUserData.hasGeneration() { + generation = consumerUserData.generation() + } + sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID} + } + } + } + + // prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition + // current and previous consumers are the last two consumers of each partition in the above sorted map + for partition, consumers := range sortedPartitionConsumersByGeneration { + // sort consumers by generation in decreasing order + var generations []int + for generation := range consumers { + generations = append(generations, generation) + } + sort.Sort(sort.Reverse(sort.IntSlice(generations))) + + consumer := consumers[generations[0]] + if _, exists := currentAssignment[consumer]; !exists { + currentAssignment[consumer] = []topicPartitionAssignment{partition} + } else { + currentAssignment[consumer] = append(currentAssignment[consumer], partition) + } + + // check for previous assignment, if any + if len(generations) > 1 { + prevAssignment[partition] = consumerGenerationPair{ + MemberID: consumers[generations[1]], + Generation: generations[1], + } + } + } + return currentAssignment, prevAssignment, nil +} + +type consumerGenerationPair struct { + MemberID string + Generation int +} + +// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment. +type consumerPair struct { + SrcMemberID string + DstMemberID string +} + +// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers. +type partitionMovements struct { + PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool + Movements map[topicPartitionAssignment]consumerPair +} + +func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair { + pair := p.Movements[partition] + delete(p.Movements, partition) + + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + delete(partitionMovementsForThisTopic[pair], partition) + if len(partitionMovementsForThisTopic[pair]) == 0 { + delete(partitionMovementsForThisTopic, pair) + } + if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 { + delete(p.PartitionMovementsByTopic, partition.Topic) + } + return pair +} + +func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) { + p.Movements[partition] = pair + if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists { + p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool) + } + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + if _, exists := partitionMovementsForThisTopic[pair]; !exists { + partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool) + } + partitionMovementsForThisTopic[pair][partition] = true +} + +func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) { + pair := consumerPair{ + SrcMemberID: oldConsumer, + DstMemberID: newConsumer, + } + if _, exists := p.Movements[partition]; exists { + // this partition has previously moved + existingPair := p.removeMovementRecordOfPartition(partition) + if existingPair.DstMemberID != oldConsumer { + Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer) + } + if existingPair.SrcMemberID != newConsumer { + // the partition is not moving back to its previous consumer + p.addPartitionMovementRecord(partition, consumerPair{ + SrcMemberID: existingPair.SrcMemberID, + DstMemberID: newConsumer, + }) + } + } else { + p.addPartitionMovementRecord(partition, pair) + } +} + +func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment { + if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists { + return partition + } + if _, exists := p.Movements[partition]; exists { + // this partition has previously moved + if oldConsumer != p.Movements[partition].DstMemberID { + Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer) + } + oldConsumer = p.Movements[partition].SrcMemberID + } + + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + reversePair := consumerPair{ + SrcMemberID: newConsumer, + DstMemberID: oldConsumer, + } + if _, exists := partitionMovementsForThisTopic[reversePair]; !exists { + return partition + } + var reversePairPartition topicPartitionAssignment + for otherPartition := range partitionMovementsForThisTopic[reversePair] { + reversePairPartition = otherPartition + } + return reversePairPartition +} + +func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) { + if src == dst { + return currentPath, false + } + if len(pairs) == 0 { + return currentPath, false + } + for _, pair := range pairs { + if src == pair.SrcMemberID && dst == pair.DstMemberID { + currentPath = append(currentPath, src, dst) + return currentPath, true + } + } + + for _, pair := range pairs { + if pair.SrcMemberID == src { + // create a deep copy of the pairs, excluding the current pair + reducedSet := make([]consumerPair, len(pairs)-1) + i := 0 + for _, p := range pairs { + if p != pair { + reducedSet[i] = pair + i++ + } + } + + currentPath = append(currentPath, pair.SrcMemberID) + return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath) + } + } + return currentPath, false +} + +func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { + superCycle := make([]string, len(cycle)-1) + for i := 0; i < len(cycle)-1; i++ { + superCycle[i] = cycle[i] + } + superCycle = append(superCycle, cycle...) + for _, foundCycle := range cycles { + if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 { + return true + } + } + return false +} + +func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { + cycles := make([][]string, 0) + for _, pair := range pairs { + // create a deep copy of the pairs, excluding the current pair + reducedPairs := make([]consumerPair, len(pairs)-1) + i := 0 + for _, p := range pairs { + if p != pair { + reducedPairs[i] = pair + i++ + } + } + if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked { + if !p.in(path, cycles) { + cycles = append(cycles, path) + Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path) + } + } + } + + // for now we want to make sure there is no partition movements of the same topic between a pair of consumers. + // the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized + // tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases. + for _, cycle := range cycles { + if len(cycle) == 3 { + return true + } + } + return false +} + +func (p *partitionMovements) isSticky() bool { + for topic, movements := range p.PartitionMovementsByTopic { + movementPairs := make([]consumerPair, len(movements)) + i := 0 + for pair := range movements { + movementPairs[i] = pair + i++ + } + if p.hasCycles(movementPairs) { + Logger.Printf("Stickiness is violated for topic %s", topic) + Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements) + return false + } + } + return true +} + +func indexOfSubList(source []string, target []string) int { + targetSize := len(target) + maxCandidate := len(source) - targetSize +nextCand: + for candidate := 0; candidate <= maxCandidate; candidate++ { + j := candidate + for i := 0; i < targetSize; i++ { + if target[i] != source[j] { + // Element mismatch, try next cand + continue nextCand + } + j++ + } + // All elements of candidate matched target + return candidate + } + return -1 +} + +type consumerGroupMember struct { + id string + assignments []topicPartitionAssignment +} + +// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted +// in descending order (most assignments to least assignments). +type assignmentPriorityQueue []*consumerGroupMember + +func (pq assignmentPriorityQueue) Len() int { return len(pq) } + +func (pq assignmentPriorityQueue) Less(i, j int) bool { + // order asssignment priority queue in descending order using assignment-count/member-id + if len(pq[i].assignments) == len(pq[j].assignments) { + return strings.Compare(pq[i].id, pq[j].id) > 0 + } + return len(pq[i].assignments) > len(pq[j].assignments) +} + +func (pq assignmentPriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} + +func (pq *assignmentPriorityQueue) Push(x interface{}) { + member := x.(*consumerGroupMember) + *pq = append(*pq, member) +} + +func (pq *assignmentPriorityQueue) Pop() interface{} { + old := *pq + n := len(old) + member := old[n-1] + *pq = old[0 : n-1] + return member +} diff --git a/vendor/github.com/Shopify/sarama/balance_strategy_test.go b/vendor/github.com/Shopify/sarama/balance_strategy_test.go deleted file mode 100644 index 047157f3..00000000 --- a/vendor/github.com/Shopify/sarama/balance_strategy_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package sarama - -import ( - "reflect" - "testing" -) - -func TestBalanceStrategyRange(t *testing.T) { - tests := []struct { - members map[string][]string - topics map[string][]int32 - expected BalanceStrategyPlan - }{ - { - members: map[string][]string{"M1": {"T1", "T2"}, "M2": {"T1", "T2"}}, - topics: map[string][]int32{"T1": {0, 1, 2, 3}, "T2": {0, 1, 2, 3}}, - expected: BalanceStrategyPlan{ - "M1": map[string][]int32{"T1": {0, 1}, "T2": {2, 3}}, - "M2": map[string][]int32{"T1": {2, 3}, "T2": {0, 1}}, - }, - }, - { - members: map[string][]string{"M1": {"T1", "T2"}, "M2": {"T1", "T2"}}, - topics: map[string][]int32{"T1": {0, 1, 2}, "T2": {0, 1, 2}}, - expected: BalanceStrategyPlan{ - "M1": map[string][]int32{"T1": {0, 1}, "T2": {2}}, - "M2": map[string][]int32{"T1": {2}, "T2": {0, 1}}, - }, - }, - { - members: map[string][]string{"M1": {"T1"}, "M2": {"T1", "T2"}}, - topics: map[string][]int32{"T1": {0, 1}, "T2": {0, 1}}, - expected: BalanceStrategyPlan{ - "M1": map[string][]int32{"T1": {0}}, - "M2": map[string][]int32{"T1": {1}, "T2": {0, 1}}, - }, - }, - } - - strategy := BalanceStrategyRange - if strategy.Name() != "range" { - t.Errorf("Unexpected stategy name\nexpected: range\nactual: %v", strategy.Name()) - } - - for _, test := range tests { - members := make(map[string]ConsumerGroupMemberMetadata) - for memberID, topics := range test.members { - members[memberID] = ConsumerGroupMemberMetadata{Topics: topics} - } - - actual, err := strategy.Plan(members, test.topics) - if err != nil { - t.Errorf("Unexpected error %v", err) - } else if !reflect.DeepEqual(actual, test.expected) { - t.Errorf("Plan does not match expectation\nexpected: %#v\nactual: %#v", test.expected, actual) - } - } -} - -func TestBalanceStrategyRoundRobin(t *testing.T) { - tests := []struct { - members map[string][]string - topics map[string][]int32 - expected BalanceStrategyPlan - }{ - { - members: map[string][]string{"M1": {"T1", "T2"}, "M2": {"T1", "T2"}}, - topics: map[string][]int32{"T1": {0, 1, 2, 3}, "T2": {0, 1, 2, 3}}, - expected: BalanceStrategyPlan{ - "M1": map[string][]int32{"T1": {0, 2}, "T2": {1, 3}}, - "M2": map[string][]int32{"T1": {1, 3}, "T2": {0, 2}}, - }, - }, - { - members: map[string][]string{"M1": {"T1", "T2"}, "M2": {"T1", "T2"}}, - topics: map[string][]int32{"T1": {0, 1, 2}, "T2": {0, 1, 2}}, - expected: BalanceStrategyPlan{ - "M1": map[string][]int32{"T1": {0, 2}, "T2": {1}}, - "M2": map[string][]int32{"T1": {1}, "T2": {0, 2}}, - }, - }, - } - - strategy := BalanceStrategyRoundRobin - if strategy.Name() != "roundrobin" { - t.Errorf("Unexpected stategy name\nexpected: range\nactual: %v", strategy.Name()) - } - - for _, test := range tests { - members := make(map[string]ConsumerGroupMemberMetadata) - for memberID, topics := range test.members { - members[memberID] = ConsumerGroupMemberMetadata{Topics: topics} - } - - actual, err := strategy.Plan(members, test.topics) - if err != nil { - t.Errorf("Unexpected error %v", err) - } else if !reflect.DeepEqual(actual, test.expected) { - t.Errorf("Plan does not match expectation\nexpected: %#v\nactual: %#v", test.expected, actual) - } - } -} diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go index 26f63d51..5858a23c 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -6,7 +6,9 @@ import ( "fmt" "io" "net" + "sort" "strconv" + "strings" "sync" "sync/atomic" "time" @@ -16,19 +18,20 @@ import ( // Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. type Broker struct { - id int32 - addr string + conf *Config rack *string - conf *Config + id int32 + addr string correlationID int32 conn net.Conn connErr error lock sync.Mutex opened int32 + responses chan responsePromise + done chan bool - responses chan responsePromise - done chan bool + registeredMetrics []string incomingByteRate metrics.Meter requestRate metrics.Meter @@ -37,6 +40,7 @@ type Broker struct { outgoingByteRate metrics.Meter responseRate metrics.Meter responseSize metrics.Histogram + requestsInFlight metrics.Counter brokerIncomingByteRate metrics.Meter brokerRequestRate metrics.Meter brokerRequestSize metrics.Histogram @@ -44,11 +48,78 @@ type Broker struct { brokerOutgoingByteRate metrics.Meter brokerResponseRate metrics.Meter brokerResponseSize metrics.Histogram + brokerRequestsInFlight metrics.Counter + + kerberosAuthenticator GSSAPIKerberosAuth +} + +// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker +type SASLMechanism string + +const ( + // SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+) + SASLTypeOAuth = "OAUTHBEARER" + // SASLTypePlaintext represents the SASL/PLAIN mechanism + SASLTypePlaintext = "PLAIN" + // SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism. + SASLTypeSCRAMSHA256 = "SCRAM-SHA-256" + // SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism. + SASLTypeSCRAMSHA512 = "SCRAM-SHA-512" + SASLTypeGSSAPI = "GSSAPI" + // SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and + // server negotiate SASL auth using opaque packets. + SASLHandshakeV0 = int16(0) + // SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and + // server negotiate SASL by wrapping tokens with Kafka protocol headers. + SASLHandshakeV1 = int16(1) + // SASLExtKeyAuth is the reserved extension key name sent as part of the + // SASL/OAUTHBEARER initial client response + SASLExtKeyAuth = "auth" +) + +// AccessToken contains an access token used to authenticate a +// SASL/OAUTHBEARER client along with associated metadata. +type AccessToken struct { + // Token is the access token payload. + Token string + // Extensions is a optional map of arbitrary key-value pairs that can be + // sent with the SASL/OAUTHBEARER initial client response. These values are + // ignored by the SASL server if they are unexpected. This feature is only + // supported by Kafka >= 2.1.0. + Extensions map[string]string +} + +// AccessTokenProvider is the interface that encapsulates how implementors +// can generate access tokens for Kafka broker authentication. +type AccessTokenProvider interface { + // Token returns an access token. The implementation should ensure token + // reuse so that multiple calls at connect time do not create multiple + // tokens. The implementation should also periodically refresh the token in + // order to guarantee that each call returns an unexpired token. This + // method should not block indefinitely--a timeout error should be returned + // after a short period of inactivity so that the broker connection logic + // can log debugging information and retry. + Token() (*AccessToken, error) +} + +// SCRAMClient is a an interface to a SCRAM +// client implementation. +type SCRAMClient interface { + // Begin prepares the client for the SCRAM exchange + // with the server with a user name and a password + Begin(userName, password, authzID string) error + // Step steps client through the SCRAM exchange. It is + // called repeatedly until it errors or `Done` returns true. + Step(challenge string) (response string, err error) + // Done should return true when the SCRAM conversation + // is over. + Done() bool } type responsePromise struct { requestTime time.Time correlationID int32 + headerVersion int16 packets chan []byte errors chan error } @@ -83,25 +154,19 @@ func (b *Broker) Open(conf *Config) error { go withRecover(func() { defer b.lock.Unlock() - dialer := net.Dialer{ - Timeout: conf.Net.DialTimeout, - KeepAlive: conf.Net.KeepAlive, - LocalAddr: conf.Net.LocalAddr, - } - - if conf.Net.TLS.Enable { - b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) - } else { - b.conn, b.connErr = dialer.Dial("tcp", b.addr) - } + dialer := conf.getDialer() + b.conn, b.connErr = dialer.Dial("tcp", b.addr) if b.connErr != nil { Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) b.conn = nil atomic.StoreInt32(&b.opened, 0) return } - b.conn = newBufConn(b.conn) + if conf.Net.TLS.Enable { + b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config)) + } + b.conn = newBufConn(b.conn) b.conf = conf // Create or reuse the global metrics shared between brokers @@ -112,20 +177,16 @@ func (b *Broker) Open(conf *Config) error { b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) + b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry) // Do not gather metrics for seeded broker (only used during bootstrap) because they share // the same id (-1) and are already exposed through the global metrics above if b.id >= 0 { - b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry) - b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry) - b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry) - b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry) - b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry) - b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry) - b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry) + b.registerMetrics() } if conf.Net.SASL.Enable { - b.connErr = b.sendAndReceiveSASLPlainAuth() + b.connErr = b.authenticateViaSASL() + if b.connErr != nil { err = b.conn.Close() if err == nil { @@ -162,6 +223,7 @@ func (b *Broker) Connected() (bool, error) { return b.conn != nil, b.connErr } +//Close closes the broker resources func (b *Broker) Close() error { b.lock.Lock() defer b.lock.Unlock() @@ -180,12 +242,7 @@ func (b *Broker) Close() error { b.done = nil b.responses = nil - if b.id >= 0 { - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b)) - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b)) - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b)) - b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b)) - } + b.unregisterMetrics() if err == nil { Logger.Printf("Closed connection to broker %s\n", b.addr) @@ -208,6 +265,18 @@ func (b *Broker) Addr() string { return b.addr } +// Rack returns the broker's rack as retrieved from Kafka's metadata or the +// empty string if it is not known. The returned value corresponds to the +// broker's broker.rack configuration setting. Requires protocol version to be +// at least v0.10.0.0. +func (b *Broker) Rack() string { + if b.rack == nil { + return "" + } + return *b.rack +} + +//GetMetadata send a metadata request and returns a metadata response or error func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { response := new(MetadataResponse) @@ -220,6 +289,7 @@ func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error return response, nil } +//GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { response := new(ConsumerMetadataResponse) @@ -232,6 +302,7 @@ func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*Consume return response, nil } +//FindCoordinator sends a find coordinate request and returns a response or error func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) { response := new(FindCoordinatorResponse) @@ -244,6 +315,7 @@ func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordina return response, nil } +//GetAvailableOffsets return an offset response or error func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { response := new(OffsetResponse) @@ -256,9 +328,12 @@ func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, e return response, nil } +//Produce returns a produce response or error func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { - var response *ProduceResponse - var err error + var ( + response *ProduceResponse + err error + ) if request.RequiredAcks == NoResponse { err = b.sendAndReceive(request, nil) @@ -274,11 +349,11 @@ func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { return response, nil } +//Fetch returns a FetchResponse or error func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { response := new(FetchResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -286,11 +361,11 @@ func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { return response, nil } +//CommitOffset return an Offset commit response or error func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { response := new(OffsetCommitResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -298,11 +373,11 @@ func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitRespon return response, nil } +//FetchOffset returns an offset fetch response or error func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { response := new(OffsetFetchResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -310,6 +385,7 @@ func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, return response, nil } +//JoinGroup returns a join group response or error func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { response := new(JoinGroupResponse) @@ -321,6 +397,7 @@ func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error return response, nil } +//SyncGroup returns a sync group response or error func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { response := new(SyncGroupResponse) @@ -332,6 +409,7 @@ func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error return response, nil } +//LeaveGroup return a leave group response or error func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { response := new(LeaveGroupResponse) @@ -343,6 +421,7 @@ func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, er return response, nil } +//Heartbeat returns a heartbeat response or error func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { response := new(HeartbeatResponse) @@ -354,6 +433,7 @@ func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error return response, nil } +//ListGroups return a list group response or error func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { response := new(ListGroupsResponse) @@ -365,6 +445,7 @@ func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, er return response, nil } +//DescribeGroups return describe group response or error func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { response := new(DescribeGroupsResponse) @@ -376,6 +457,7 @@ func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroups return response, nil } +//ApiVersions return api version response or error func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { response := new(ApiVersionsResponse) @@ -387,6 +469,7 @@ func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, return response, nil } +//CreateTopics send a create topic request and returns create topic response func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) { response := new(CreateTopicsResponse) @@ -398,6 +481,7 @@ func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsRespon return response, nil } +//DeleteTopics sends a delete topic request and returns delete topic response func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { response := new(DeleteTopicsResponse) @@ -409,6 +493,8 @@ func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsRespon return response, nil } +//CreatePartitions sends a create partition request and returns create +//partitions response or error func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) { response := new(CreatePartitionsResponse) @@ -420,6 +506,34 @@ func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePart return response, nil } +//AlterPartitionReassignments sends a alter partition reassignments request and +//returns alter partition reassignments response +func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) { + response := new(AlterPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//ListPartitionReassignments sends a list partition reassignments request and +//returns list partition reassignments response +func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) { + response := new(ListPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DeleteRecords send a request to delete records and return delete record +//response or error func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { response := new(DeleteRecordsResponse) @@ -431,6 +545,7 @@ func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsRes return response, nil } +//DescribeAcls sends a describe acl request and returns a response or error func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) { response := new(DescribeAclsResponse) @@ -442,6 +557,7 @@ func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsRespon return response, nil } +//CreateAcls sends a create acl request and returns a response or error func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) { response := new(CreateAclsResponse) @@ -453,6 +569,7 @@ func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, er return response, nil } +//DeleteAcls sends a delete acl request and returns a response or error func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) { response := new(DeleteAclsResponse) @@ -464,6 +581,7 @@ func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, er return response, nil } +//InitProducerID sends an init producer request and returns a response or error func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) { response := new(InitProducerIDResponse) @@ -475,6 +593,8 @@ func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerID return response, nil } +//AddPartitionsToTxn send a request to add partition to txn and returns +//a response or error func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) { response := new(AddPartitionsToTxnResponse) @@ -486,6 +606,8 @@ func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPar return response, nil } +//AddOffsetsToTxn sends a request to add offsets to txn and returns a response +//or error func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) { response := new(AddOffsetsToTxnResponse) @@ -497,6 +619,7 @@ func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsTo return response, nil } +//EndTxn sends a request to end txn and returns a response or error func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { response := new(EndTxnResponse) @@ -508,6 +631,8 @@ func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { return response, nil } +//TxnOffsetCommit sends a request to commit transaction offsets and returns +//a response or error func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) { response := new(TxnOffsetCommitResponse) @@ -519,6 +644,8 @@ func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCom return response, nil } +//DescribeConfigs sends a request to describe config and returns a response or +//error func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { response := new(DescribeConfigsResponse) @@ -530,6 +657,7 @@ func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConf return response, nil } +//AlterConfigs sends a request to alter config and return a response or error func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) { response := new(AlterConfigsResponse) @@ -541,6 +669,7 @@ func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsRespon return response, nil } +//DeleteGroups sends a request to delete groups and returns a response or error func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) { response := new(DeleteGroupsResponse) @@ -551,7 +680,39 @@ func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsRespon return response, nil } -func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { +//DescribeLogDirs sends a request to get the broker's log dir paths and sizes +func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) { + response := new(DescribeLogDirsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// readFull ensures the conn ReadDeadline has been setup before making a +// call to io.ReadFull +func (b *Broker) readFull(buf []byte) (n int, err error) { + if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil { + return 0, err + } + + return io.ReadFull(b.conn, buf) +} + +// write ensures the conn WriteDeadline has been setup before making a +// call to conn.Write +func (b *Broker) write(buf []byte) (n int, err error) { + if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil { + return 0, err + } + + return b.conn.Write(buf) +} + +func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) { b.lock.Lock() defer b.lock.Unlock() @@ -572,34 +733,36 @@ func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, return nil, err } - err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) - if err != nil { - return nil, err - } - requestTime := time.Now() - bytes, err := b.conn.Write(buf) + // Will be decremented in responseReceiver (except error or request with NoResponse) + b.addRequestInFlightMetrics(1) + bytes, err := b.write(buf) b.updateOutgoingCommunicationMetrics(bytes) if err != nil { + b.addRequestInFlightMetrics(-1) return nil, err } b.correlationID++ if !promiseResponse { // Record request latency without the response - b.updateRequestLatencyMetrics(time.Since(requestTime)) + b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime)) return nil, nil } - promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)} + promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)} b.responses <- promise return &promise, nil } -func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { - promise, err := b.send(req, res != nil) +func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { + responseHeaderVersion := int16(-1) + if res != nil { + responseHeaderVersion = res.headerVersion() + } + promise, err := b.send(req, res != nil, responseHeaderVersion) if err != nil { return err } @@ -648,11 +811,11 @@ func (b *Broker) decode(pd packetDecoder, version int16) (err error) { } func (b *Broker) encode(pe packetEncoder, version int16) (err error) { - host, portstr, err := net.SplitHostPort(b.addr) if err != nil { return err } + port, err := strconv.Atoi(portstr) if err != nil { return err @@ -679,21 +842,20 @@ func (b *Broker) encode(pe packetEncoder, version int16) (err error) { func (b *Broker) responseReceiver() { var dead error - header := make([]byte, 8) + for response := range b.responses { if dead != nil { + // This was previously incremented in send() and + // we are not calling updateIncomingCommunicationMetrics() + b.addRequestInFlightMetrics(-1) response.errors <- dead continue } - err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)) - if err != nil { - dead = err - response.errors <- err - continue - } + var headerLength = getHeaderLength(response.headerVersion) + header := make([]byte, headerLength) - bytesReadHeader, err := io.ReadFull(b.conn, header) + bytesReadHeader, err := b.readFull(header) requestLatency := time.Since(response.requestTime) if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) @@ -703,7 +865,7 @@ func (b *Broker) responseReceiver() { } decodedHeader := responseHeader{} - err = decode(header, &decodedHeader) + err = versionedDecode(header, &decodedHeader, response.headerVersion) if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err @@ -719,8 +881,8 @@ func (b *Broker) responseReceiver() { continue } - buf := make([]byte, decodedHeader.length-4) - bytesReadBody, err := io.ReadFull(b.conn, buf) + buf := make([]byte, decodedHeader.length-int32(headerLength)+4) + bytesReadBody, err := b.readFull(buf) b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) if err != nil { dead = err @@ -733,58 +895,96 @@ func (b *Broker) responseReceiver() { close(b.done) } -func (b *Broker) sendAndReceiveSASLPlainHandshake() error { - rb := &SaslHandshakeRequest{"PLAIN"} - req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) - if err != nil { - return err +func getHeaderLength(headerVersion int16) int8 { + if headerVersion < 1 { + return 8 + } else { + // header contains additional tagged field length (0), we don't support actual tags yet. + return 9 + } +} + +func (b *Broker) authenticateViaSASL() error { + switch b.conf.Net.SASL.Mechanism { + case SASLTypeOAuth: + return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider) + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + return b.sendAndReceiveSASLSCRAMv1() + case SASLTypeGSSAPI: + return b.sendAndReceiveKerberos() + default: + return b.sendAndReceiveSASLPlainAuth() } +} - err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) +func (b *Broker) sendAndReceiveKerberos() error { + b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI + if b.kerberosAuthenticator.NewKerberosClientFunc == nil { + b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient + } + return b.kerberosAuthenticator.Authorize(b) +} + +func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error { + rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version} + + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) if err != nil { return err } requestTime := time.Now() - bytes, err := b.conn.Write(buf) + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytes, err := b.write(buf) b.updateOutgoingCommunicationMetrics(bytes) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error()) return err } b.correlationID++ - //wait for the response + header := make([]byte, 8) // response header - _, err = io.ReadFull(b.conn, header) + _, err = b.readFull(header) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) return err } + length := binary.BigEndian.Uint32(header[:4]) payload := make([]byte, length-4) - n, err := io.ReadFull(b.conn, payload) + n, err := b.readFull(payload) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) return err } + b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) res := &SaslHandshakeResponse{} + err = versionedDecode(payload, res, 0) if err != nil { Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) return err } + if res.Err != ErrNoError { Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) return res.Err } - Logger.Print("Successful SASL handshake") + + Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms) return nil } -// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149) -// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9 +// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43). +// Kafka 1.x.x onward added a SaslAuthenticate request/response message which +// wraps the SASL flow in the Kafka protocol, which allows for returning +// meaningful errors on authentication failure. // // In SASL Plain, Kafka expects the auth header to be in the following format // Message format (from https://tools.ietf.org/html/rfc4616): @@ -798,38 +998,50 @@ func (b *Broker) sendAndReceiveSASLPlainHandshake() error { // SAFE = UTF1 / UTF2 / UTF3 / UTF4 // ;; any UTF-8 encoded Unicode character except NUL // +// With SASL v0 handshake and auth then: // When credentials are valid, Kafka returns a 4 byte array of null characters. -// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way -// of responding to bad credentials but thats how its being done today. +// When credentials are invalid, Kafka closes the connection. +// +// With SASL v1 handshake and auth then: +// When credentials are invalid, Kafka replies with a SaslAuthenticate response +// containing an error code and message detailing the authentication failure. func (b *Broker) sendAndReceiveSASLPlainAuth() error { + // default to V0 to allow for backward compatibility when SASL is enabled + // but not the handshake if b.conf.Net.SASL.Handshake { - handshakeErr := b.sendAndReceiveSASLPlainHandshake() + handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version) if handshakeErr != nil { Logger.Printf("Error while performing SASL handshake %s\n", b.addr) return handshakeErr } } - length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) - authBytes := make([]byte, length+4) //4 byte length header + auth data - binary.BigEndian.PutUint32(authBytes, uint32(length)) - copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)) - err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) - if err != nil { - Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error()) - return err + if b.conf.Net.SASL.Version == SASLHandshakeV1 { + return b.sendAndReceiveV1SASLPlainAuth() } + return b.sendAndReceiveV0SASLPlainAuth() +} + +// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol +func (b *Broker) sendAndReceiveV0SASLPlainAuth() error { + length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) + authBytes := make([]byte, length+4) //4 byte length header + auth data + binary.BigEndian.PutUint32(authBytes, uint32(length)) + copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password) requestTime := time.Now() - bytesWritten, err := b.conn.Write(authBytes) + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytesWritten, err := b.write(authBytes) b.updateOutgoingCommunicationMetrics(bytesWritten) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) return err } header := make([]byte, 4) - n, err := io.ReadFull(b.conn, header) + n, err := b.readFull(header) b.updateIncomingCommunicationMetrics(n, time.Since(requestTime)) // If the credentials are valid, we would get a 4 byte response filled with null characters. // Otherwise, the broker closes the connection and we get an EOF @@ -842,29 +1054,319 @@ func (b *Broker) sendAndReceiveSASLPlainAuth() error { return nil } +// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol +func (b *Broker) sendAndReceiveV1SASLPlainAuth() error { + correlationID := b.correlationID + + requestTime := time.Now() + + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID) + b.updateOutgoingCommunicationMetrics(bytesWritten) + + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.correlationID++ + + bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID) + b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime)) + + // With v1 sasl we get an error message set in the response we can return + if err != nil { + Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error()) + return err + } + + return nil +} + +// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255 +// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876 +func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error { + if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil { + return err + } + + token, err := provider.Token() + if err != nil { + return err + } + + message, err := buildClientFirstMessage(token) + if err != nil { + return err + } + + challenged, err := b.sendClientMessage(message) + if err != nil { + return err + } + + if challenged { + // Abort the token exchange. The broker returns the failure code. + _, err = b.sendClientMessage([]byte(`\x01`)) + } + + return err +} + +// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true +// if the broker responds with a challenge, in which case the token is +// rejected. +func (b *Broker) sendClientMessage(message []byte) (bool, error) { + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + correlationID := b.correlationID + + bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + b.addRequestInFlightMetrics(-1) + return false, err + } + + b.correlationID++ + + res := &SaslAuthenticateResponse{} + bytesRead, err := b.receiveSASLServerResponse(res, correlationID) + + requestLatency := time.Since(requestTime) + b.updateIncomingCommunicationMetrics(bytesRead, requestLatency) + + isChallenge := len(res.SaslAuthBytes) > 0 + + if isChallenge && err != nil { + Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes) + } + + return isChallenge, err +} + +func (b *Broker) sendAndReceiveSASLSCRAMv1() error { + if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil { + return err + } + + scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc() + if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { + return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error()) + } + + msg, err := scramClient.Step("") + if err != nil { + return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error()) + } + + for !scramClient.Done() { + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + correlationID := b.correlationID + bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg)) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.correlationID++ + challenge, err := b.receiveSaslAuthenticateResponse(correlationID) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime)) + msg, err = scramClient.Step(string(challenge)) + if err != nil { + Logger.Println("SASL authentication failed", err) + return err + } + } + + Logger.Println("SASL authentication succeeded") + return nil +} + +func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) { + rb := &SaslAuthenticateRequest{msg} + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) { + buf := make([]byte, responseLengthSize+correlationIDSize) + _, err := b.readFull(buf) + if err != nil { + return nil, err + } + + header := responseHeader{} + err = versionedDecode(buf, &header, 0) + if err != nil { + return nil, err + } + + if header.correlationID != correlationID { + return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) + } + + buf = make([]byte, header.length-correlationIDSize) + _, err = b.readFull(buf) + if err != nil { + return nil, err + } + + res := &SaslAuthenticateResponse{} + if err := versionedDecode(buf, res, 0); err != nil { + return nil, err + } + if res.Err != ErrNoError { + return nil, res.Err + } + return res.SaslAuthBytes, nil +} + +// Build SASL/OAUTHBEARER initial client response as described by RFC-7628 +// https://tools.ietf.org/html/rfc7628 +func buildClientFirstMessage(token *AccessToken) ([]byte, error) { + var ext string + + if token.Extensions != nil && len(token.Extensions) > 0 { + if _, ok := token.Extensions[SASLExtKeyAuth]; ok { + return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth) + } + ext = "\x01" + mapToString(token.Extensions, "=", "\x01") + } + + resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext)) + + return resp, nil +} + +// mapToString returns a list of key-value pairs ordered by key. +// keyValSep separates the key from the value. elemSep separates each pair. +func mapToString(extensions map[string]string, keyValSep string, elemSep string) string { + buf := make([]string, 0, len(extensions)) + + for k, v := range extensions { + buf = append(buf, k+keyValSep+v) + } + + sort.Strings(buf) + + return strings.Join(buf, elemSep) +} + +func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) { + authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) + rb := &SaslAuthenticateRequest{authBytes} + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) { + rb := &SaslAuthenticateRequest{initialResp} + + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) { + buf := make([]byte, responseLengthSize+correlationIDSize) + bytesRead, err := b.readFull(buf) + if err != nil { + return bytesRead, err + } + + header := responseHeader{} + err = versionedDecode(buf, &header, 0) + if err != nil { + return bytesRead, err + } + + if header.correlationID != correlationID { + return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) + } + + buf = make([]byte, header.length-correlationIDSize) + c, err := b.readFull(buf) + bytesRead += c + if err != nil { + return bytesRead, err + } + + if err := versionedDecode(buf, res, 0); err != nil { + return bytesRead, err + } + + if res.Err != ErrNoError { + return bytesRead, res.Err + } + + return bytesRead, nil +} + func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { - b.updateRequestLatencyMetrics(requestLatency) + b.updateRequestLatencyAndInFlightMetrics(requestLatency) b.responseRate.Mark(1) + if b.brokerResponseRate != nil { b.brokerResponseRate.Mark(1) } + responseSize := int64(bytes) b.incomingByteRate.Mark(responseSize) if b.brokerIncomingByteRate != nil { b.brokerIncomingByteRate.Mark(responseSize) } + b.responseSize.Update(responseSize) if b.brokerResponseSize != nil { b.brokerResponseSize.Update(responseSize) } } -func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) { +func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) { requestLatencyInMs := int64(requestLatency / time.Millisecond) b.requestLatency.Update(requestLatencyInMs) + if b.brokerRequestLatency != nil { b.brokerRequestLatency.Update(requestLatencyInMs) } + + b.addRequestInFlightMetrics(-1) +} + +func (b *Broker) addRequestInFlightMetrics(i int64) { + b.requestsInFlight.Inc(i) + if b.brokerRequestsInFlight != nil { + b.brokerRequestsInFlight.Inc(i) + } } func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { @@ -872,13 +1374,68 @@ func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { if b.brokerRequestRate != nil { b.brokerRequestRate.Mark(1) } + requestSize := int64(bytes) b.outgoingByteRate.Mark(requestSize) if b.brokerOutgoingByteRate != nil { b.brokerOutgoingByteRate.Mark(requestSize) } + b.requestSize.Update(requestSize) if b.brokerRequestSize != nil { b.brokerRequestSize.Update(requestSize) } } + +func (b *Broker) registerMetrics() { + b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate") + b.brokerRequestRate = b.registerMeter("request-rate") + b.brokerRequestSize = b.registerHistogram("request-size") + b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms") + b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate") + b.brokerResponseRate = b.registerMeter("response-rate") + b.brokerResponseSize = b.registerHistogram("response-size") + b.brokerRequestsInFlight = b.registerCounter("requests-in-flight") +} + +func (b *Broker) unregisterMetrics() { + for _, name := range b.registeredMetrics { + b.conf.MetricRegistry.Unregister(name) + } + b.registeredMetrics = nil +} + +func (b *Broker) registerMeter(name string) metrics.Meter { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry) +} + +func (b *Broker) registerHistogram(name string) metrics.Histogram { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry) +} + +func (b *Broker) registerCounter(name string) metrics.Counter { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry) +} + +func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config { + if cfg == nil { + cfg = &tls.Config{} + } + if cfg.ServerName != "" { + return cfg + } + + c := cfg.Clone() + sn, _, err := net.SplitHostPort(addr) + if err != nil { + Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err)) + } + c.ServerName = sn + return c +} diff --git a/vendor/github.com/Shopify/sarama/broker_test.go b/vendor/github.com/Shopify/sarama/broker_test.go deleted file mode 100644 index 9263cef8..00000000 --- a/vendor/github.com/Shopify/sarama/broker_test.go +++ /dev/null @@ -1,358 +0,0 @@ -package sarama - -import ( - "fmt" - "testing" - "time" -) - -func ExampleBroker() { - broker := NewBroker("localhost:9092") - err := broker.Open(nil) - if err != nil { - panic(err) - } - - request := MetadataRequest{Topics: []string{"myTopic"}} - response, err := broker.GetMetadata(&request) - if err != nil { - _ = broker.Close() - panic(err) - } - - fmt.Println("There are", len(response.Topics), "topics active in the cluster.") - - if err = broker.Close(); err != nil { - panic(err) - } -} - -type mockEncoder struct { - bytes []byte -} - -func (m mockEncoder) encode(pe packetEncoder) error { - return pe.putRawBytes(m.bytes) -} - -type brokerMetrics struct { - bytesRead int - bytesWritten int -} - -func TestBrokerAccessors(t *testing.T) { - broker := NewBroker("abc:123") - - if broker.ID() != -1 { - t.Error("New broker didn't have an ID of -1.") - } - - if broker.Addr() != "abc:123" { - t.Error("New broker didn't have the correct address") - } - - broker.id = 34 - if broker.ID() != 34 { - t.Error("Manually setting broker ID did not take effect.") - } -} - -func TestSimpleBrokerCommunication(t *testing.T) { - for _, tt := range brokerTestTable { - Logger.Printf("Testing broker communication for %s", tt.name) - mb := NewMockBroker(t, 0) - mb.Returns(&mockEncoder{tt.response}) - pendingNotify := make(chan brokerMetrics) - // Register a callback to be notified about successful requests - mb.SetNotifier(func(bytesRead, bytesWritten int) { - pendingNotify <- brokerMetrics{bytesRead, bytesWritten} - }) - broker := NewBroker(mb.Addr()) - // Set the broker id in order to validate local broker metrics - broker.id = 0 - conf := NewConfig() - conf.Version = tt.version - err := broker.Open(conf) - if err != nil { - t.Fatal(err) - } - tt.runner(t, broker) - // Wait up to 500 ms for the remote broker to process the request and - // notify us about the metrics - timeout := 500 * time.Millisecond - select { - case mockBrokerMetrics := <-pendingNotify: - validateBrokerMetrics(t, broker, mockBrokerMetrics) - case <-time.After(timeout): - t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout) - } - mb.Close() - err = broker.Close() - if err != nil { - t.Error(err) - } - } - -} - -// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake -var brokerTestTable = []struct { - version KafkaVersion - name string - response []byte - runner func(*testing.T, *Broker) -}{ - {V0_10_0_0, - "MetadataRequest", - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := MetadataRequest{} - response, err := broker.GetMetadata(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Metadata request got no response!") - } - }}, - - {V0_10_0_0, - "ConsumerMetadataRequest", - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := ConsumerMetadataRequest{} - response, err := broker.GetConsumerMetadata(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Consumer Metadata request got no response!") - } - }}, - - {V0_10_0_0, - "ProduceRequest (NoResponse)", - []byte{}, - func(t *testing.T, broker *Broker) { - request := ProduceRequest{} - request.RequiredAcks = NoResponse - response, err := broker.Produce(&request) - if err != nil { - t.Error(err) - } - if response != nil { - t.Error("Produce request with NoResponse got a response!") - } - }}, - - {V0_10_0_0, - "ProduceRequest (WaitForLocal)", - []byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := ProduceRequest{} - request.RequiredAcks = WaitForLocal - response, err := broker.Produce(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Produce request without NoResponse got no response!") - } - }}, - - {V0_10_0_0, - "FetchRequest", - []byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := FetchRequest{} - response, err := broker.Fetch(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Fetch request got no response!") - } - }}, - - {V0_10_0_0, - "OffsetFetchRequest", - []byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := OffsetFetchRequest{} - response, err := broker.FetchOffset(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("OffsetFetch request got no response!") - } - }}, - - {V0_10_0_0, - "OffsetCommitRequest", - []byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := OffsetCommitRequest{} - response, err := broker.CommitOffset(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("OffsetCommit request got no response!") - } - }}, - - {V0_10_0_0, - "OffsetRequest", - []byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := OffsetRequest{} - response, err := broker.GetAvailableOffsets(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Offset request got no response!") - } - }}, - - {V0_10_0_0, - "JoinGroupRequest", - []byte{0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := JoinGroupRequest{} - response, err := broker.JoinGroup(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("JoinGroup request got no response!") - } - }}, - - {V0_10_0_0, - "SyncGroupRequest", - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := SyncGroupRequest{} - response, err := broker.SyncGroup(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("SyncGroup request got no response!") - } - }}, - - {V0_10_0_0, - "LeaveGroupRequest", - []byte{0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := LeaveGroupRequest{} - response, err := broker.LeaveGroup(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("LeaveGroup request got no response!") - } - }}, - - {V0_10_0_0, - "HeartbeatRequest", - []byte{0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := HeartbeatRequest{} - response, err := broker.Heartbeat(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Heartbeat request got no response!") - } - }}, - - {V0_10_0_0, - "ListGroupsRequest", - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := ListGroupsRequest{} - response, err := broker.ListGroups(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("ListGroups request got no response!") - } - }}, - - {V0_10_0_0, - "DescribeGroupsRequest", - []byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := DescribeGroupsRequest{} - response, err := broker.DescribeGroups(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("DescribeGroups request got no response!") - } - }}, - - {V0_10_0_0, - "ApiVersionsRequest", - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := ApiVersionsRequest{} - response, err := broker.ApiVersions(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("ApiVersions request got no response!") - } - }}, - - {V1_1_0_0, - "DeleteGroupsRequest", - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := DeleteGroupsRequest{} - response, err := broker.DeleteGroups(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("DeleteGroups request got no response!") - } - }}, -} - -func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) { - metricValidators := newMetricValidators() - mockBrokerBytesRead := mockBrokerMetrics.bytesRead - mockBrokerBytesWritten := mockBrokerMetrics.bytesWritten - - // Check that the number of bytes sent corresponds to what the mock broker received - metricValidators.registerForAllBrokers(broker, countMeterValidator("incoming-byte-rate", mockBrokerBytesWritten)) - if mockBrokerBytesWritten == 0 { - // This a ProduceRequest with NoResponse - metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 0)) - metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 0)) - metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", 0, 0)) - } else { - metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 1)) - metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 1)) - metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", mockBrokerBytesWritten, mockBrokerBytesWritten)) - } - - // Check that the number of bytes received corresponds to what the mock broker sent - metricValidators.registerForAllBrokers(broker, countMeterValidator("outgoing-byte-rate", mockBrokerBytesRead)) - metricValidators.registerForAllBrokers(broker, countMeterValidator("request-rate", 1)) - metricValidators.registerForAllBrokers(broker, countHistogramValidator("request-size", 1)) - metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("request-size", mockBrokerBytesRead, mockBrokerBytesRead)) - - // Run the validators - metricValidators.run(t, broker.conf.MetricRegistry) -} diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go index ad805346..6127ed78 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/Shopify/sarama/client.go @@ -17,12 +17,21 @@ type Client interface { // altered after it has been created. Config() *Config - // Controller returns the cluster controller broker. Requires Kafka 0.10 or higher. + // Controller returns the cluster controller broker. It will return a + // locally cached value if it's available. You can call RefreshController + // to update the cached value. Requires Kafka 0.10 or higher. Controller() (*Broker, error) + // RefreshController retrieves the cluster controller from fresh metadata + // and stores it in the local cache. Requires Kafka 0.10 or higher. + RefreshController() (*Broker, error) + // Brokers returns the current set of active brokers as retrieved from cluster metadata. Brokers() []*Broker + // Broker returns the active Broker if available for the broker ID. + Broker(brokerID int32) (*Broker, error) + // Topics returns the set of available topics as retrieved from cluster metadata. Topics() ([]string, error) @@ -46,6 +55,15 @@ type Client interface { // the partition leader. InSyncReplicas(topic string, partitionID int32) ([]int32, error) + // OfflineReplicas returns the set of all offline replica IDs for the given + // partition. Offline replicas are replicas which are offline + OfflineReplicas(topic string, partitionID int32) ([]int32, error) + + // RefreshBrokers takes a list of addresses to be used as seed brokers. + // Existing broker connections are closed and the updated list of seed brokers + // will be used for the next metadata fetch. + RefreshBrokers(addrs []string) error + // RefreshMetadata takes a list of topics and queries the cluster to refresh the // available metadata for those topics. If no topics are provided, it will refresh // metadata for all topics. @@ -67,6 +85,9 @@ type Client interface { // in local cache. This function only works on Kafka 0.8.2 and higher. RefreshCoordinator(consumerGroup string) error + // InitProducerID retrieves information required for Idempotent Producer + InitProducerID() (*InitProducerIDResponse, error) + // Close shuts down all broker connections managed by this client. It is required // to call this function before a client object passes out of scope, as it will // otherwise leak memory. You must close any Producers or Consumers using a client @@ -142,10 +163,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) { coordinators: make(map[string]int32), } - random := rand.New(rand.NewSource(time.Now().UnixNano())) - for _, index := range random.Perm(len(addrs)) { - client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) - } + client.randomizeSeedBrokers(addrs) if conf.Metadata.Full { // do an initial fetch of all cluster metadata by specifying an empty list of topics @@ -176,13 +194,43 @@ func (client *client) Config() *Config { func (client *client) Brokers() []*Broker { client.lock.RLock() defer client.lock.RUnlock() - brokers := make([]*Broker, 0) + brokers := make([]*Broker, 0, len(client.brokers)) for _, broker := range client.brokers { brokers = append(brokers, broker) } return brokers } +func (client *client) Broker(brokerID int32) (*Broker, error) { + client.lock.RLock() + defer client.lock.RUnlock() + broker, ok := client.brokers[brokerID] + if !ok { + return nil, ErrBrokerNotFound + } + _ = broker.Open(client.conf) + return broker, nil +} + +func (client *client) InitProducerID() (*InitProducerIDResponse, error) { + var err error + for broker := client.any(); broker != nil; broker = client.any() { + req := &InitProducerIDRequest{} + + response, err := broker.InitProducerID(req) + switch err.(type) { + case nil: + return response, nil + default: + // some error, remove that broker and try again + Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err) + _ = broker.Close() + client.deregisterBroker(broker) + } + } + return nil, err +} + func (client *client) Close() error { if client.Closed() { // Chances are this is being called from a defer() and the error will go unobserved @@ -215,6 +263,9 @@ func (client *client) Close() error { } func (client *client) Closed() bool { + client.lock.RLock() + defer client.lock.RUnlock() + return client.brokers == nil } @@ -265,7 +316,8 @@ func (client *client) Partitions(topic string) ([]int32, error) { partitions = client.cachedPartitions(topic, allPartitions) } - if partitions == nil { + // no partitions found after refresh metadata + if len(partitions) == 0 { return nil, ErrUnknownTopicOrPartition } @@ -350,6 +402,31 @@ func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, return dupInt32Slice(metadata.Isr), nil } +func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.OfflineReplicas), metadata.Err + } + return dupInt32Slice(metadata.OfflineReplicas), nil +} + func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { if client.Closed() { return nil, ErrClosedClient @@ -368,6 +445,27 @@ func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { return leader, err } +func (client *client) RefreshBrokers(addrs []string) error { + if client.Closed() { + return ErrClosedClient + } + + client.lock.Lock() + defer client.lock.Unlock() + + for _, broker := range client.brokers { + _ = broker.Close() + delete(client.brokers, broker.ID()) + } + + client.seedBrokers = nil + client.deadSeeds = nil + + client.randomizeSeedBrokers(addrs) + + return nil +} + func (client *client) RefreshMetadata(topics ...string) error { if client.Closed() { return ErrClosedClient @@ -382,7 +480,11 @@ func (client *client) RefreshMetadata(topics ...string) error { } } - return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max) + deadline := time.Time{} + if client.conf.Metadata.Timeout > 0 { + deadline = time.Now().Add(client.conf.Metadata.Timeout) + } + return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline) } func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { @@ -427,6 +529,35 @@ func (client *client) Controller() (*Broker, error) { return controller, nil } +// deregisterController removes the cached controllerID +func (client *client) deregisterController() { + client.lock.Lock() + defer client.lock.Unlock() + delete(client.brokers, client.controllerID) +} + +// RefreshController retrieves the cluster controller from fresh metadata +// and stores it in the local cache. Requires Kafka 0.10 or higher. +func (client *client) RefreshController() (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.deregisterController() + + if err := client.refreshMetadata(); err != nil { + return nil, err + } + + controller := client.cachedController() + if controller == nil { + return nil, ErrControllerNotAvailable + } + + _ = controller.Open(client.conf) + return controller, nil +} + func (client *client) Coordinator(consumerGroup string) (*Broker, error) { if client.Closed() { return nil, ErrClosedClient @@ -468,10 +599,46 @@ func (client *client) RefreshCoordinator(consumerGroup string) error { // private broker management helpers +func (client *client) randomizeSeedBrokers(addrs []string) { + random := rand.New(rand.NewSource(time.Now().UnixNano())) + for _, index := range random.Perm(len(addrs)) { + client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) + } +} + +func (client *client) updateBroker(brokers []*Broker) { + var currentBroker = make(map[int32]*Broker, len(brokers)) + + for _, broker := range brokers { + currentBroker[broker.ID()] = broker + if client.brokers[broker.ID()] == nil { // add new broker + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } + } + + for id, broker := range client.brokers { + if _, exist := currentBroker[id]; !exist { // remove old broker + safeAsyncClose(broker) + delete(client.brokers, id) + Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr()) + } + } +} + // registerBroker makes sure a broker received by a Metadata or Coordinator request is registered // in the brokers map. It returns the broker that is registered, which may be the provided broker, // or a previously registered Broker instance. You must hold the write lock before calling this function. func (client *client) registerBroker(broker *Broker) { + if client.brokers == nil { + Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr()) + return + } + if client.brokers[broker.ID()] == nil { client.brokers[broker.ID()] = broker Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) @@ -665,7 +832,7 @@ func (client *client) backgroundMetadataUpdater() { } func (client *client) refreshMetadata() error { - topics := []string{} + var topics []string if !client.conf.Metadata.Full { if specificTopics, err := client.MetadataTopics(); err != nil { @@ -684,29 +851,47 @@ func (client *client) refreshMetadata() error { return nil } -func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error { +func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error { + pastDeadline := func(backoff time.Duration) bool { + if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) { + // we are past the deadline + return true + } + return false + } retry := func(err error) error { if attemptsRemaining > 0 { - Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) - return client.tryRefreshMetadata(topics, attemptsRemaining-1) + backoff := client.computeBackoff(attemptsRemaining) + if pastDeadline(backoff) { + Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout") + return err + } + Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) + if backoff > 0 { + time.Sleep(backoff) + } + return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline) } return err } - for broker := client.any(); broker != nil; broker = client.any() { + broker := client.any() + for ; broker != nil && !pastDeadline(0); broker = client.any() { + allowAutoTopicCreation := true if len(topics) > 0 { Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) } else { + allowAutoTopicCreation = false Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) } - req := &MetadataRequest{Topics: topics} - if client.conf.Version.IsAtLeast(V0_10_0_0) { + req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation} + if client.conf.Version.IsAtLeast(V1_0_0_0) { + req.Version = 5 + } else if client.conf.Version.IsAtLeast(V0_10_0_0) { req.Version = 1 } response, err := broker.GetMetadata(req) - switch err.(type) { case nil: allKnownMetaData := len(topics) == 0 @@ -721,14 +906,36 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) case PacketEncodingError: // didn't even send, return the error return err + + case KError: + // if SASL auth error return as this _should_ be a non retryable err for all brokers + if err.(KError) == ErrSASLAuthenticationFailed { + Logger.Println("client/metadata failed SASL authentication") + return err + } + + if err.(KError) == ErrTopicAuthorizationFailed { + Logger.Println("client is not authorized to access this topic. The topics were: ", topics) + return err + } + // else remove that broker and try again + Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) + _ = broker.Close() + client.deregisterBroker(broker) + default: // some other error, remove that broker and try again - Logger.Println("client/metadata got error from broker while fetching metadata:", err) + Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) _ = broker.Close() client.deregisterBroker(broker) } } + if broker != nil { + Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr) + return retry(ErrOutOfBrokers) + } + Logger.Println("client/metadata no available broker to send metadata request to") client.resurrectDeadBrokers() return retry(ErrOutOfBrokers) @@ -736,16 +943,19 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) // if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) { + if client.Closed() { + return + } + client.lock.Lock() defer client.lock.Unlock() // For all the brokers we received: // - if it is a new ID, save it // - if it is an existing ID, but the address we have is stale, discard the old one and save it + // - if some brokers is not exist in it, remove old broker // - otherwise ignore it, replacing our existing one would just bounce the connection - for _, broker := range data.Brokers { - client.registerBroker(broker) - } + client.updateBroker(data.Brokers) client.controllerID = data.ControllerID @@ -766,7 +976,7 @@ func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bo switch topic.Err { case ErrNoError: - break + // no-op case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results err = topic.Err continue @@ -776,7 +986,6 @@ func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bo continue case ErrLeaderNotAvailable: // retry, but store partial partition results retry = true - break default: // don't retry, don't store partial results Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) err = topic.Err @@ -816,11 +1025,21 @@ func (client *client) cachedController() *Broker { return client.brokers[client.controllerID] } +func (client *client) computeBackoff(attemptsRemaining int) time.Duration { + if client.conf.Metadata.Retry.BackoffFunc != nil { + maxRetries := client.conf.Metadata.Retry.Max + retries := maxRetries - attemptsRemaining + return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries) + } + return client.conf.Metadata.Retry.Backoff +} + func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) { retry := func(err error) (*FindCoordinatorResponse, error) { if attemptsRemaining > 0 { - Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) + backoff := client.computeBackoff(attemptsRemaining) + Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) + time.Sleep(backoff) return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) } return nil, err @@ -865,6 +1084,10 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin } return retry(ErrConsumerCoordinatorNotAvailable) + case ErrGroupAuthorizationFailed: + Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup) + return retry(ErrGroupAuthorizationFailed) + default: return nil, response.Err } @@ -874,3 +1097,18 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin client.resurrectDeadBrokers() return retry(ErrOutOfBrokers) } + +// nopCloserClient embeds an existing Client, but disables +// the Close method (yet all other methods pass +// through unchanged). This is for use in larger structs +// where it is undesirable to close the client that was +// passed in by the caller. +type nopCloserClient struct { + Client +} + +// Close intercepts and purposely does not call the underlying +// client's Close() method. +func (ncc *nopCloserClient) Close() error { + return nil +} diff --git a/vendor/github.com/Shopify/sarama/client_test.go b/vendor/github.com/Shopify/sarama/client_test.go deleted file mode 100644 index 1d0924d0..00000000 --- a/vendor/github.com/Shopify/sarama/client_test.go +++ /dev/null @@ -1,661 +0,0 @@ -package sarama - -import ( - "io" - "sync" - "testing" - "time" -) - -func safeClose(t testing.TB, c io.Closer) { - err := c.Close() - if err != nil { - t.Error(err) - } -} - -func TestSimpleClient(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - seedBroker.Returns(new(MetadataResponse)) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestCachedPartitions(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - replicas := []int32{3, 1, 5} - isr := []int32{5, 1} - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker("localhost:12345", 2) - metadataResponse.AddTopicPartition("my_topic", 0, 2, replicas, isr, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, 2, replicas, isr, ErrLeaderNotAvailable) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - c, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - client := c.(*client) - - // Verify they aren't cached the same - allP := client.cachedPartitionsResults["my_topic"][allPartitions] - writeP := client.cachedPartitionsResults["my_topic"][writablePartitions] - if len(allP) == len(writeP) { - t.Fatal("Invalid lengths!") - } - - tmp := client.cachedPartitionsResults["my_topic"] - // Verify we actually use the cache at all! - tmp[allPartitions] = []int32{1, 2, 3, 4} - client.cachedPartitionsResults["my_topic"] = tmp - if 4 != len(client.cachedPartitions("my_topic", allPartitions)) { - t.Fatal("Not using the cache!") - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestClientDoesntCachePartitionsForTopicsWithErrors(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - replicas := []int32{seedBroker.BrokerID()} - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 1, replicas[0], replicas, replicas, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 2, replicas[0], replicas, replicas, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - metadataResponse = new(MetadataResponse) - metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataResponse) - - partitions, err := client.Partitions("unknown") - - if err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - if partitions != nil { - t.Errorf("Should return nil as partition list, found %v", partitions) - } - - // Should still use the cache of a known topic - partitions, err = client.Partitions("my_topic") - if err != nil { - t.Errorf("Expected no error, found %v", err) - } - - metadataResponse = new(MetadataResponse) - metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataResponse) - - // Should not use cache for unknown topic - partitions, err = client.Partitions("unknown") - if err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - if partitions != nil { - t.Errorf("Should return nil as partition list, found %v", partitions) - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestClientSeedBrokers(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker("localhost:12345", 2) - seedBroker.Returns(metadataResponse) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestClientMetadata(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 5) - - replicas := []int32{3, 1, 5} - isr := []int32{5, 1} - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, ErrLeaderNotAvailable) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - topics, err := client.Topics() - if err != nil { - t.Error(err) - } else if len(topics) != 1 || topics[0] != "my_topic" { - t.Error("Client returned incorrect topics:", topics) - } - - parts, err := client.Partitions("my_topic") - if err != nil { - t.Error(err) - } else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 { - t.Error("Client returned incorrect partitions for my_topic:", parts) - } - - parts, err = client.WritablePartitions("my_topic") - if err != nil { - t.Error(err) - } else if len(parts) != 1 || parts[0] != 0 { - t.Error("Client returned incorrect writable partitions for my_topic:", parts) - } - - tst, err := client.Leader("my_topic", 0) - if err != nil { - t.Error(err) - } else if tst.ID() != 5 { - t.Error("Leader for my_topic had incorrect ID.") - } - - replicas, err = client.Replicas("my_topic", 0) - if err != nil { - t.Error(err) - } else if replicas[0] != 3 { - t.Error("Incorrect (or sorted) replica") - } else if replicas[1] != 1 { - t.Error("Incorrect (or sorted) replica") - } else if replicas[2] != 5 { - t.Error("Incorrect (or sorted) replica") - } - - isr, err = client.InSyncReplicas("my_topic", 0) - if err != nil { - t.Error(err) - } else if len(isr) != 2 { - t.Error("Client returned incorrect ISRs for partition:", isr) - } else if isr[0] != 5 { - t.Error("Incorrect (or sorted) ISR:", isr) - } else if isr[1] != 1 { - t.Error("Incorrect (or sorted) ISR:", isr) - } - - leader.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientGetOffset(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - leaderAddr := leader.Addr() - - metadata := new(MetadataResponse) - metadata.AddTopicPartition("foo", 0, leader.BrokerID(), nil, nil, ErrNoError) - metadata.AddBroker(leaderAddr, leader.BrokerID()) - seedBroker.Returns(metadata) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - offsetResponse := new(OffsetResponse) - offsetResponse.AddTopicPartition("foo", 0, 123) - leader.Returns(offsetResponse) - - offset, err := client.GetOffset("foo", 0, OffsetNewest) - if err != nil { - t.Error(err) - } - if offset != 123 { - t.Error("Unexpected offset, got ", offset) - } - - leader.Close() - seedBroker.Returns(metadata) - - leader = NewMockBrokerAddr(t, 2, leaderAddr) - offsetResponse = new(OffsetResponse) - offsetResponse.AddTopicPartition("foo", 0, 456) - leader.Returns(offsetResponse) - - offset, err = client.GetOffset("foo", 0, OffsetNewest) - if err != nil { - t.Error(err) - } - if offset != 456 { - t.Error("Unexpected offset, got ", offset) - } - - seedBroker.Close() - leader.Close() - safeClose(t, client) -} - -func TestClientReceivingUnknownTopic(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - metadataResponse1 := new(MetadataResponse) - seedBroker.Returns(metadataResponse1) - - config := NewConfig() - config.Metadata.Retry.Max = 1 - config.Metadata.Retry.Backoff = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - metadataUnknownTopic := new(MetadataResponse) - metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataUnknownTopic) - seedBroker.Returns(metadataUnknownTopic) - - if err := client.RefreshMetadata("new_topic"); err != ErrUnknownTopicOrPartition { - t.Error("ErrUnknownTopicOrPartition expected, got", err) - } - - // If we are asking for the leader of a partition of the non-existing topic. - // we will request metadata again. - seedBroker.Returns(metadataUnknownTopic) - seedBroker.Returns(metadataUnknownTopic) - - if _, err = client.Leader("new_topic", 1); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - safeClose(t, client) - seedBroker.Close() -} - -func TestClientReceivingPartialMetadata(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 5) - - metadataResponse1 := new(MetadataResponse) - metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) - seedBroker.Returns(metadataResponse1) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()} - - metadataPartial := new(MetadataResponse) - metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable) - metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, ErrNoError) - metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, ErrLeaderNotAvailable) - seedBroker.Returns(metadataPartial) - - if err := client.RefreshMetadata("new_topic"); err != nil { - t.Error("ErrLeaderNotAvailable should not make RefreshMetadata respond with an error") - } - - // Even though the metadata was incomplete, we should be able to get the leader of a partition - // for which we did get a useful response, without doing additional requests. - - partition0Leader, err := client.Leader("new_topic", 0) - if err != nil { - t.Error(err) - } else if partition0Leader.Addr() != leader.Addr() { - t.Error("Unexpected leader returned", partition0Leader.Addr()) - } - - // If we are asking for the leader of a partition that didn't have a leader before, - // we will do another metadata request. - - seedBroker.Returns(metadataPartial) - - // Still no leader for the partition, so asking for it should return an error. - _, err = client.Leader("new_topic", 1) - if err != ErrLeaderNotAvailable { - t.Error("Expected ErrLeaderNotAvailable, got", err) - } - - safeClose(t, client) - seedBroker.Close() - leader.Close() -} - -func TestClientRefreshBehaviour(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 5) - - metadataResponse1 := new(MetadataResponse) - metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) - seedBroker.Returns(metadataResponse1) - - metadataResponse2 := new(MetadataResponse) - metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse2) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - parts, err := client.Partitions("my_topic") - if err != nil { - t.Error(err) - } else if len(parts) != 1 || parts[0] != 0xb { - t.Error("Client returned incorrect partitions for my_topic:", parts) - } - - tst, err := client.Leader("my_topic", 0xb) - if err != nil { - t.Error(err) - } else if tst.ID() != 5 { - t.Error("Leader for my_topic had incorrect ID.") - } - - leader.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientResurrectDeadSeeds(t *testing.T) { - initialSeed := NewMockBroker(t, 0) - emptyMetadata := new(MetadataResponse) - initialSeed.Returns(emptyMetadata) - - conf := NewConfig() - conf.Metadata.Retry.Backoff = 0 - conf.Metadata.RefreshFrequency = 0 - c, err := NewClient([]string{initialSeed.Addr()}, conf) - if err != nil { - t.Fatal(err) - } - initialSeed.Close() - - client := c.(*client) - - seed1 := NewMockBroker(t, 1) - seed2 := NewMockBroker(t, 2) - seed3 := NewMockBroker(t, 3) - addr1 := seed1.Addr() - addr2 := seed2.Addr() - addr3 := seed3.Addr() - - // Overwrite the seed brokers with a fixed ordering to make this test deterministic. - safeClose(t, client.seedBrokers[0]) - client.seedBrokers = []*Broker{NewBroker(addr1), NewBroker(addr2), NewBroker(addr3)} - client.deadSeeds = []*Broker{} - - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - if err := client.RefreshMetadata(); err != nil { - t.Error(err) - } - wg.Done() - }() - seed1.Close() - seed2.Close() - - seed1 = NewMockBrokerAddr(t, 1, addr1) - seed2 = NewMockBrokerAddr(t, 2, addr2) - - seed3.Close() - - seed1.Close() - seed2.Returns(emptyMetadata) - - wg.Wait() - - if len(client.seedBrokers) != 2 { - t.Error("incorrect number of live seeds") - } - if len(client.deadSeeds) != 1 { - t.Error("incorrect number of dead seeds") - } - - safeClose(t, c) -} - -func TestClientController(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - defer seedBroker.Close() - controllerBroker := NewMockBroker(t, 2) - defer controllerBroker.Close() - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetController(controllerBroker.BrokerID()). - SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). - SetBroker(controllerBroker.Addr(), controllerBroker.BrokerID()), - }) - - cfg := NewConfig() - - // test kafka version greater than 0.10.0.0 - cfg.Version = V0_10_0_0 - client1, err := NewClient([]string{seedBroker.Addr()}, cfg) - if err != nil { - t.Fatal(err) - } - defer safeClose(t, client1) - broker, err := client1.Controller() - if err != nil { - t.Fatal(err) - } - if broker.Addr() != controllerBroker.Addr() { - t.Errorf("Expected controller to have address %s, found %s", controllerBroker.Addr(), broker.Addr()) - } - - // test kafka version earlier than 0.10.0.0 - cfg.Version = V0_9_0_1 - client2, err := NewClient([]string{seedBroker.Addr()}, cfg) - if err != nil { - t.Fatal(err) - } - defer safeClose(t, client2) - if _, err = client2.Controller(); err != ErrUnsupportedVersion { - t.Errorf("Expected Contoller() to return %s, found %s", ErrUnsupportedVersion, err) - } -} - -func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - staleCoordinator := NewMockBroker(t, 2) - freshCoordinator := NewMockBroker(t, 3) - - replicas := []int32{staleCoordinator.BrokerID(), freshCoordinator.BrokerID()} - metadataResponse1 := new(MetadataResponse) - metadataResponse1.AddBroker(staleCoordinator.Addr(), staleCoordinator.BrokerID()) - metadataResponse1.AddBroker(freshCoordinator.Addr(), freshCoordinator.BrokerID()) - metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError) - seedBroker.Returns(metadataResponse1) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - coordinatorResponse1 := new(ConsumerMetadataResponse) - coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable - seedBroker.Returns(coordinatorResponse1) - - coordinatorResponse2 := new(ConsumerMetadataResponse) - coordinatorResponse2.CoordinatorID = staleCoordinator.BrokerID() - coordinatorResponse2.CoordinatorHost = "127.0.0.1" - coordinatorResponse2.CoordinatorPort = staleCoordinator.Port() - - seedBroker.Returns(coordinatorResponse2) - - broker, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if staleCoordinator.Addr() != broker.Addr() { - t.Errorf("Expected coordinator to have address %s, found %s", staleCoordinator.Addr(), broker.Addr()) - } - - if staleCoordinator.BrokerID() != broker.ID() { - t.Errorf("Expected coordinator to have ID %d, found %d", staleCoordinator.BrokerID(), broker.ID()) - } - - // Grab the cached value - broker2, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if broker2.Addr() != broker.Addr() { - t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr()) - } - - coordinatorResponse3 := new(ConsumerMetadataResponse) - coordinatorResponse3.CoordinatorID = freshCoordinator.BrokerID() - coordinatorResponse3.CoordinatorHost = "127.0.0.1" - coordinatorResponse3.CoordinatorPort = freshCoordinator.Port() - - seedBroker.Returns(coordinatorResponse3) - - // Refresh the locally cahced value because it's stale - if err := client.RefreshCoordinator("my_group"); err != nil { - t.Error(err) - } - - // Grab the fresh value - broker3, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if broker3.Addr() != freshCoordinator.Addr() { - t.Errorf("Expected the freshCoordinator to be returned, but found %s.", broker3.Addr()) - } - - freshCoordinator.Close() - staleCoordinator.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientCoordinatorWithoutConsumerOffsetsTopic(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - coordinator := NewMockBroker(t, 2) - - metadataResponse1 := new(MetadataResponse) - seedBroker.Returns(metadataResponse1) - - config := NewConfig() - config.Metadata.Retry.Max = 1 - config.Metadata.Retry.Backoff = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - coordinatorResponse1 := new(ConsumerMetadataResponse) - coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable - seedBroker.Returns(coordinatorResponse1) - - metadataResponse2 := new(MetadataResponse) - metadataResponse2.AddTopic("__consumer_offsets", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataResponse2) - - replicas := []int32{coordinator.BrokerID()} - metadataResponse3 := new(MetadataResponse) - metadataResponse3.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError) - seedBroker.Returns(metadataResponse3) - - coordinatorResponse2 := new(ConsumerMetadataResponse) - coordinatorResponse2.CoordinatorID = coordinator.BrokerID() - coordinatorResponse2.CoordinatorHost = "127.0.0.1" - coordinatorResponse2.CoordinatorPort = coordinator.Port() - - seedBroker.Returns(coordinatorResponse2) - - broker, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if coordinator.Addr() != broker.Addr() { - t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr()) - } - - if coordinator.BrokerID() != broker.ID() { - t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID()) - } - - coordinator.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientAutorefreshShutdownRace(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - - metadataResponse := new(MetadataResponse) - seedBroker.Returns(metadataResponse) - - conf := NewConfig() - conf.Metadata.RefreshFrequency = 100 * time.Millisecond - client, err := NewClient([]string{seedBroker.Addr()}, conf) - if err != nil { - t.Fatal(err) - } - - // Wait for the background refresh to kick in - time.Sleep(110 * time.Millisecond) - - done := make(chan none) - go func() { - // Close the client - if err := client.Close(); err != nil { - t.Fatal(err) - } - close(done) - }() - - // Wait for the Close to kick in - time.Sleep(10 * time.Millisecond) - - // Then return some metadata to the still-running background thread - leader := NewMockBroker(t, 2) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("foo", 0, leader.BrokerID(), []int32{2}, []int32{2}, ErrNoError) - seedBroker.Returns(metadataResponse) - - <-done - - seedBroker.Close() - - // give the update time to happen so we get a panic if it's still running (which it shouldn't) - time.Sleep(10 * time.Millisecond) -} diff --git a/vendor/github.com/Shopify/sarama/client_tls_test.go b/vendor/github.com/Shopify/sarama/client_tls_test.go deleted file mode 100644 index eff84c77..00000000 --- a/vendor/github.com/Shopify/sarama/client_tls_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package sarama - -import ( - "math/big" - "net" - "testing" - "time" - - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" -) - -func TestTLS(t *testing.T) { - cakey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal(err) - } - - clientkey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal(err) - } - - hostkey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal(err) - } - - nvb := time.Now().Add(-1 * time.Hour) - nva := time.Now().Add(1 * time.Hour) - - caTemplate := &x509.Certificate{ - Subject: pkix.Name{CommonName: "ca"}, - Issuer: pkix.Name{CommonName: "ca"}, - SerialNumber: big.NewInt(0), - NotAfter: nva, - NotBefore: nvb, - IsCA: true, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageCertSign, - } - caDer, err := x509.CreateCertificate(rand.Reader, caTemplate, caTemplate, &cakey.PublicKey, cakey) - if err != nil { - t.Fatal(err) - } - caFinalCert, err := x509.ParseCertificate(caDer) - if err != nil { - t.Fatal(err) - } - - hostDer, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{ - Subject: pkix.Name{CommonName: "host"}, - Issuer: pkix.Name{CommonName: "ca"}, - IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)}, - SerialNumber: big.NewInt(0), - NotAfter: nva, - NotBefore: nvb, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - }, caFinalCert, &hostkey.PublicKey, cakey) - if err != nil { - t.Fatal(err) - } - - clientDer, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{ - Subject: pkix.Name{CommonName: "client"}, - Issuer: pkix.Name{CommonName: "ca"}, - SerialNumber: big.NewInt(0), - NotAfter: nva, - NotBefore: nvb, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - }, caFinalCert, &clientkey.PublicKey, cakey) - if err != nil { - t.Fatal(err) - } - - pool := x509.NewCertPool() - pool.AddCert(caFinalCert) - - systemCerts, err := x509.SystemCertPool() - if err != nil { - t.Fatal(err) - } - - // Keep server the same - it's the client that we're testing - serverTLSConfig := &tls.Config{ - Certificates: []tls.Certificate{tls.Certificate{ - Certificate: [][]byte{hostDer}, - PrivateKey: hostkey, - }}, - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: pool, - } - - for _, tc := range []struct { - Succeed bool - Server, Client *tls.Config - }{ - { // Verify client fails if wrong CA cert pool is specified - Succeed: false, - Server: serverTLSConfig, - Client: &tls.Config{ - RootCAs: systemCerts, - Certificates: []tls.Certificate{tls.Certificate{ - Certificate: [][]byte{clientDer}, - PrivateKey: clientkey, - }}, - }, - }, - { // Verify client fails if wrong key is specified - Succeed: false, - Server: serverTLSConfig, - Client: &tls.Config{ - RootCAs: pool, - Certificates: []tls.Certificate{tls.Certificate{ - Certificate: [][]byte{clientDer}, - PrivateKey: hostkey, - }}, - }, - }, - { // Verify client fails if wrong cert is specified - Succeed: false, - Server: serverTLSConfig, - Client: &tls.Config{ - RootCAs: pool, - Certificates: []tls.Certificate{tls.Certificate{ - Certificate: [][]byte{hostDer}, - PrivateKey: clientkey, - }}, - }, - }, - { // Verify client fails if no CAs are specified - Succeed: false, - Server: serverTLSConfig, - Client: &tls.Config{ - Certificates: []tls.Certificate{tls.Certificate{ - Certificate: [][]byte{clientDer}, - PrivateKey: clientkey, - }}, - }, - }, - { // Verify client fails if no keys are specified - Succeed: false, - Server: serverTLSConfig, - Client: &tls.Config{ - RootCAs: pool, - }, - }, - { // Finally, verify it all works happily with client and server cert in place - Succeed: true, - Server: serverTLSConfig, - Client: &tls.Config{ - RootCAs: pool, - Certificates: []tls.Certificate{tls.Certificate{ - Certificate: [][]byte{clientDer}, - PrivateKey: clientkey, - }}, - }, - }, - } { - doListenerTLSTest(t, tc.Succeed, tc.Server, tc.Client) - } -} - -func doListenerTLSTest(t *testing.T, expectSuccess bool, serverConfig, clientConfig *tls.Config) { - serverConfig.BuildNameToCertificate() - clientConfig.BuildNameToCertificate() - - seedListener, err := tls.Listen("tcp", "127.0.0.1:0", serverConfig) - if err != nil { - t.Fatal("cannot open listener", err) - } - - var childT *testing.T - if expectSuccess { - childT = t - } else { - childT = &testing.T{} // we want to swallow errors - } - - seedBroker := NewMockBrokerListener(childT, 1, seedListener) - defer seedBroker.Close() - - seedBroker.Returns(new(MetadataResponse)) - - config := NewConfig() - config.Net.TLS.Enable = true - config.Net.TLS.Config = clientConfig - - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err == nil { - safeClose(t, client) - } - - if expectSuccess { - if err != nil { - t.Fatal(err) - } - } else { - if err == nil { - t.Fatal("expected failure") - } - } -} diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go new file mode 100644 index 00000000..12cd7c3d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/compress.go @@ -0,0 +1,194 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +var ( + lz4WriterPool = sync.Pool{ + New: func() interface{} { + return lz4.NewWriter(nil) + }, + } + + gzipWriterPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, + } + gzipWriterPoolForCompressionLevel1 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 1) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel2 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 2) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel3 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 3) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel4 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 4) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel5 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 5) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel6 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 6) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel7 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 7) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel8 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 8) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel9 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 9) + if err != nil { + panic(err) + } + return gz + }, + } +) + +func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var ( + err error + buf bytes.Buffer + writer *gzip.Writer + ) + + switch level { + case CompressionLevelDefault: + writer = gzipWriterPool.Get().(*gzip.Writer) + defer gzipWriterPool.Put(writer) + writer.Reset(&buf) + case 1: + writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel1.Put(writer) + writer.Reset(&buf) + case 2: + writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel2.Put(writer) + writer.Reset(&buf) + case 3: + writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel3.Put(writer) + writer.Reset(&buf) + case 4: + writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel4.Put(writer) + writer.Reset(&buf) + case 5: + writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel5.Put(writer) + writer.Reset(&buf) + case 6: + writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel6.Put(writer) + writer.Reset(&buf) + case 7: + writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel7.Put(writer) + writer.Reset(&buf) + case 8: + writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel8.Put(writer) + writer.Reset(&buf) + case 9: + writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel9.Put(writer) + writer.Reset(&buf) + default: + writer, err = gzip.NewWriterLevel(&buf, level) + if err != nil { + return nil, err + } + } + if _, err := writer.Write(data); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil + case CompressionSnappy: + return snappy.Encode(data), nil + case CompressionLZ4: + writer := lz4WriterPool.Get().(*lz4.Writer) + defer lz4WriterPool.Put(writer) + + var buf bytes.Buffer + writer.Reset(&buf) + + if _, err := writer.Write(data); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil + case CompressionZSTD: + return zstdCompress(nil, data) + default: + return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go index faf11e83..43e739ca 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/Shopify/sarama/config.go @@ -10,6 +10,7 @@ import ( "time" "github.com/rcrowley/go-metrics" + "golang.org/x/net/proxy" ) const defaultClientID = "sarama" @@ -20,6 +21,13 @@ var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) type Config struct { // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client. Admin struct { + Retry struct { + // The total number of times to retry sending (retriable) admin requests (default 5). + // Similar to the `retries` setting of the JVM AdminClientConfig. + Max int + // Backoff time between retries of a failed request (default 100ms) + Backoff time.Duration + } // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations, // including topics, brokers, configurations and ACLs (defaults to 3 seconds). Timeout time.Duration @@ -54,17 +62,43 @@ type Config struct { // Whether or not to use SASL authentication when connecting to the broker // (defaults to false). Enable bool + // SASLMechanism is the name of the enabled SASL mechanism. + // Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN). + Mechanism SASLMechanism + // Version is the SASL Protocol Version to use + // Kafka > 1.x should use V1, except on Azure EventHub which use V0 + Version int16 // Whether or not to send the Kafka SASL handshake first if enabled // (defaults to true). You should only set this to false if you're using // a non-Kafka SASL proxy. Handshake bool - //username and password for SASL/PLAIN authentication - User string + // AuthIdentity is an (optional) authorization identity (authzid) to + // use for SASL/PLAIN authentication (if different from User) when + // an authenticated user is permitted to act as the presented + // alternative user. See RFC4616 for details. + AuthIdentity string + // User is the authentication identity (authcid) to present for + // SASL/PLAIN or SASL/SCRAM authentication + User string + // Password for SASL/PLAIN authentication Password string + // authz id used for SASL/SCRAM authentication + SCRAMAuthzID string + // SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM + // client used to perform the SCRAM exchange with the server. + SCRAMClientGeneratorFunc func() SCRAMClient + // TokenProvider is a user-defined callback for generating + // access tokens for SASL/OAUTHBEARER auth. See the + // AccessTokenProvider interface docs for proper implementation + // guidelines. + TokenProvider AccessTokenProvider + + GSSAPI GSSAPIConfig } - // KeepAlive specifies the keep-alive period for an active network connection. - // If zero, keep-alives are disabled. (default is 0: disabled). + // KeepAlive specifies the keep-alive period for an active network connection (defaults to 0). + // If zero or positive, keep-alives are enabled. + // If negative, keep-alives are disabled. KeepAlive time.Duration // LocalAddr is the local address to use when dialing an @@ -72,6 +106,14 @@ type Config struct { // network being dialed. // If nil, a local address is automatically chosen. LocalAddr net.Addr + + Proxy struct { + // Whether or not to use proxy when connecting to the broker + // (defaults to false). + Enable bool + // The proxy dialer to use enabled (defaults to nil). + Dialer proxy.Dialer + } } // Metadata is the namespace for metadata management properties used by the @@ -84,6 +126,10 @@ type Config struct { // How long to wait for leader election to occur before retrying // (default 250ms). Similar to the JVM's `retry.backoff.ms`. Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration } // How frequently to refresh the cluster metadata in the background. // Defaults to 10 minutes. Set to 0 to disable. Similar to @@ -95,6 +141,13 @@ type Config struct { // and usually more convenient, but can take up a substantial amount of // memory if you have many topics and partitions. Defaults to true. Full bool + + // How long to wait for a successful metadata response. + // Disabled by default which means a metadata request against an unreachable + // cluster (all brokers are unreachable or unresponsive) can take up to + // `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max` + // to fail. + Timeout time.Duration } // Producer is the namespace for configuration related to producing messages, @@ -124,6 +177,9 @@ type Config struct { // (defaults to hashing the message key). Similar to the `partitioner.class` // setting for the JVM producer. Partitioner PartitionerConstructor + // If enabled, the producer will ensure that exactly one copy of each message is + // written. + Idempotent bool // Return specifies what channels will be populated. If they are set to true, // you must read from the respective channels to prevent deadlock. If, @@ -168,7 +224,19 @@ type Config struct { // (default 100ms). Similar to the `retry.backoff.ms` setting of the // JVM producer. Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration } + + // Interceptors to be called when the producer dispatcher reads the + // message for the first time. Interceptors allows to intercept and + // possible mutate the message before they are published to Kafka + // cluster. *ProducerMessage modified by the first interceptor's + // OnSend() is passed to the second interceptor OnSend(), and so on in + // the interceptor chain. + Interceptors []ProducerInterceptor } // Consumer is the namespace for configuration related to consuming messages, @@ -226,6 +294,10 @@ type Config struct { // How long to wait after a failing to read from a partition before // trying again (default 2s). Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries int) time.Duration } // Fetch is the namespace for controlling how many bytes are retrieved by any @@ -263,7 +335,7 @@ type Config struct { // than this, that partition will stop fetching more messages until it // can proceed again. // Note that, since the Messages channel is buffered, the actual grace time is - // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. + // (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms. // If a message is not written to the Messages channel between two ticks // of the expiryTicker then a timeout is detected. // Using a ticker instead of a timer to detect timeouts should typically @@ -289,9 +361,21 @@ type Config struct { // offsets. This currently requires the manual use of an OffsetManager // but will eventually be automated. Offsets struct { - // How frequently to commit updated offsets. Defaults to 1s. + // Deprecated: CommitInterval exists for historical compatibility + // and should not be used. Please use Consumer.Offsets.AutoCommit CommitInterval time.Duration + // AutoCommit specifies configuration for commit messages automatically. + AutoCommit struct { + // Whether or not to auto-commit updated offsets back to the broker. + // (default enabled). + Enable bool + + // How frequently to commit updated offsets. Ineffective unless + // auto-commit is enabled (default 1s) + Interval time.Duration + } + // The initial offset to use if no offset was previously committed. // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. Initial int64 @@ -310,12 +394,29 @@ type Config struct { Max int } } + + // IsolationLevel support 2 mode: + // - use `ReadUncommitted` (default) to consume and return all messages in message channel + // - use `ReadCommitted` to hide messages that are part of an aborted transaction + IsolationLevel IsolationLevel + + // Interceptors to be called just before the record is sent to the + // messages channel. Interceptors allows to intercept and possible + // mutate the message before they are returned to the client. + // *ConsumerMessage modified by the first interceptor's OnConsume() is + // passed to the second interceptor OnConsume(), and so on in the + // interceptor chain. + Interceptors []ConsumerInterceptor } // A user-provided string sent with every request to the brokers for logging, // debugging, and auditing purposes. Defaults to "sarama", but you should // probably set it to something specific to your application. ClientID string + // A rack identifier for this client. This can be any string value which + // indicates where this client is physically located. + // It corresponds with the broker config 'broker.rack' + RackID string // The number of events to buffer in internal and external channels. This // permits the producer and consumer to continue processing some messages // in the background while user code is working, greatly improving throughput. @@ -340,6 +441,8 @@ type Config struct { func NewConfig() *Config { c := &Config{} + c.Admin.Retry.Max = 5 + c.Admin.Retry.Backoff = 100 * time.Millisecond c.Admin.Timeout = 3 * time.Second c.Net.MaxOpenRequests = 5 @@ -347,6 +450,7 @@ func NewConfig() *Config { c.Net.ReadTimeout = 30 * time.Second c.Net.WriteTimeout = 30 * time.Second c.Net.SASL.Handshake = true + c.Net.SASL.Version = SASLHandshakeV0 c.Metadata.Retry.Max = 3 c.Metadata.Retry.Backoff = 250 * time.Millisecond @@ -368,7 +472,8 @@ func NewConfig() *Config { c.Consumer.MaxWaitTime = 250 * time.Millisecond c.Consumer.MaxProcessingTime = 100 * time.Millisecond c.Consumer.Return.Errors = false - c.Consumer.Offsets.CommitInterval = 1 * time.Second + c.Consumer.Offsets.AutoCommit.Enable = true + c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second c.Consumer.Offsets.Initial = OffsetNewest c.Consumer.Offsets.Retry.Max = 3 @@ -381,7 +486,7 @@ func NewConfig() *Config { c.ClientID = defaultClientID c.ChannelBufferSize = 256 - c.Version = MinVersion + c.Version = DefaultVersion c.MetricRegistry = metrics.NewRegistry() return c @@ -391,10 +496,10 @@ func NewConfig() *Config { // ConfigurationError if the specified values don't make sense. func (c *Config) Validate() error { // some configuration values should be warned on but not fail completely, do those first - if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { + if !c.Net.TLS.Enable && c.Net.TLS.Config != nil { Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") } - if c.Net.SASL.Enable == false { + if !c.Net.SASL.Enable { if c.Net.SASL.User != "" { Logger.Println("Net.SASL is disabled but a non-empty username was provided.") } @@ -449,12 +554,65 @@ func (c *Config) Validate() error { return ConfigurationError("Net.ReadTimeout must be > 0") case c.Net.WriteTimeout <= 0: return ConfigurationError("Net.WriteTimeout must be > 0") - case c.Net.KeepAlive < 0: - return ConfigurationError("Net.KeepAlive must be >= 0") - case c.Net.SASL.Enable == true && c.Net.SASL.User == "": - return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") - case c.Net.SASL.Enable == true && c.Net.SASL.Password == "": - return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + case c.Net.SASL.Enable: + if c.Net.SASL.Mechanism == "" { + c.Net.SASL.Mechanism = SASLTypePlaintext + } + + switch c.Net.SASL.Mechanism { + case SASLTypePlaintext: + if c.Net.SASL.User == "" { + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + } + if c.Net.SASL.Password == "" { + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + case SASLTypeOAuth: + if c.Net.SASL.TokenProvider == nil { + return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider") + } + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + if c.Net.SASL.User == "" { + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + } + if c.Net.SASL.Password == "" { + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + if c.Net.SASL.SCRAMClientGeneratorFunc == nil { + return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc") + } + case SASLTypeGSSAPI: + if c.Net.SASL.GSSAPI.ServiceName == "" { + return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used") + } + + if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH { + if c.Net.SASL.GSSAPI.Password == "" { + return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " + + "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH") + } + } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH { + if c.Net.SASL.GSSAPI.KeyTabPath == "" { + return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" + + " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") + } + } else { + return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH") + } + if c.Net.SASL.GSSAPI.KerberosConfigPath == "" { + return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used") + } + if c.Net.SASL.GSSAPI.Username == "" { + return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used") + } + if c.Net.SASL.GSSAPI.Realm == "" { + return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used") + } + default: + msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`", + SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI) + return ConfigurationError(msg) + } } // validate the Admin values @@ -511,6 +669,25 @@ func (c *Config) Validate() error { } } + if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) { + return ConfigurationError("zstd compression requires Version >= V2_1_0_0") + } + + if c.Producer.Idempotent { + if !c.Version.IsAtLeast(V0_11_0_0) { + return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0") + } + if c.Producer.Retry.Max == 0 { + return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1") + } + if c.Producer.RequiredAcks != WaitForAll { + return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll") + } + if c.Net.MaxOpenRequests > 1 { + return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1") + } + } + // validate the Consumer values switch { case c.Consumer.Fetch.Min <= 0: @@ -525,12 +702,24 @@ func (c *Config) Validate() error { return ConfigurationError("Consumer.MaxProcessingTime must be > 0") case c.Consumer.Retry.Backoff < 0: return ConfigurationError("Consumer.Retry.Backoff must be >= 0") - case c.Consumer.Offsets.CommitInterval <= 0: - return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0") + case c.Consumer.Offsets.AutoCommit.Interval <= 0: + return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0") case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") case c.Consumer.Offsets.Retry.Max < 0: return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0") + case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted: + return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted") + } + + if c.Consumer.Offsets.CommitInterval != 0 { + Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" + + " and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored") + } + + // validate IsolationLevel + if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) { + return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0") } // validate the Consumer Group values @@ -561,3 +750,16 @@ func (c *Config) Validate() error { return nil } + +func (c *Config) getDialer() proxy.Dialer { + if c.Net.Proxy.Enable { + Logger.Printf("using proxy %s", c.Net.Proxy.Dialer) + return c.Net.Proxy.Dialer + } else { + return &net.Dialer{ + Timeout: c.Net.DialTimeout, + KeepAlive: c.Net.KeepAlive, + LocalAddr: c.Net.LocalAddr, + } + } +} diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go index 848cc9c9..bef1053a 100644 --- a/vendor/github.com/Shopify/sarama/config_resource_type.go +++ b/vendor/github.com/Shopify/sarama/config_resource_type.go @@ -1,15 +1,18 @@ package sarama +// ConfigResourceType is a type for resources that have configs. type ConfigResourceType int8 -// Taken from : -// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes +// Taken from: +// https://github.com/apache/kafka/blob/ed7c071e07f1f90e4c2895582f61ca090ced3c42/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L32-L55 const ( + // UnknownResource constant type UnknownResource ConfigResourceType = 0 - AnyResource ConfigResourceType = 1 - TopicResource ConfigResourceType = 2 - GroupResource ConfigResourceType = 3 - ClusterResource ConfigResourceType = 4 - BrokerResource ConfigResourceType = 5 + // TopicResource constant type + TopicResource ConfigResourceType = 2 + // BrokerResource constant type + BrokerResource ConfigResourceType = 4 + // BrokerLoggerResource constant type + BrokerLoggerResource ConfigResourceType = 8 ) diff --git a/vendor/github.com/Shopify/sarama/config_test.go b/vendor/github.com/Shopify/sarama/config_test.go deleted file mode 100644 index d0e0af72..00000000 --- a/vendor/github.com/Shopify/sarama/config_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package sarama - -import ( - "os" - "testing" - - "github.com/rcrowley/go-metrics" -) - -func TestDefaultConfigValidates(t *testing.T) { - config := NewConfig() - if err := config.Validate(); err != nil { - t.Error(err) - } - if config.MetricRegistry == nil { - t.Error("Expected non nil metrics.MetricRegistry, got nil") - } -} - -func TestInvalidClientIDConfigValidates(t *testing.T) { - config := NewConfig() - config.ClientID = "foo:bar" - if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" { - t.Error("Expected invalid ClientID, got ", err) - } -} - -func TestEmptyClientIDConfigValidates(t *testing.T) { - config := NewConfig() - config.ClientID = "" - if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" { - t.Error("Expected invalid ClientID, got ", err) - } -} - -func TestNetConfigValidates(t *testing.T) { - tests := []struct { - name string - cfg func(*Config) // resorting to using a function as a param because of internal composite structs - err string - }{ - { - "OpenRequests", - func(cfg *Config) { - cfg.Net.MaxOpenRequests = 0 - }, - "Net.MaxOpenRequests must be > 0"}, - {"DialTimeout", - func(cfg *Config) { - cfg.Net.DialTimeout = 0 - }, - "Net.DialTimeout must be > 0"}, - {"ReadTimeout", - func(cfg *Config) { - cfg.Net.ReadTimeout = 0 - }, - "Net.ReadTimeout must be > 0"}, - {"WriteTimeout", - func(cfg *Config) { - cfg.Net.WriteTimeout = 0 - }, - "Net.WriteTimeout must be > 0"}, - {"KeepAlive", - func(cfg *Config) { - cfg.Net.KeepAlive = -1 - }, - "Net.KeepAlive must be >= 0"}, - {"SASL.User", - func(cfg *Config) { - cfg.Net.SASL.Enable = true - cfg.Net.SASL.User = "" - }, - "Net.SASL.User must not be empty when SASL is enabled"}, - {"SASL.Password", - func(cfg *Config) { - cfg.Net.SASL.Enable = true - cfg.Net.SASL.User = "user" - cfg.Net.SASL.Password = "" - }, - "Net.SASL.Password must not be empty when SASL is enabled"}, - } - - for i, test := range tests { - c := NewConfig() - test.cfg(c) - if err := c.Validate(); string(err.(ConfigurationError)) != test.err { - t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err) - } - } -} - -func TestMetadataConfigValidates(t *testing.T) { - tests := []struct { - name string - cfg func(*Config) // resorting to using a function as a param because of internal composite structs - err string - }{ - { - "Retry.Max", - func(cfg *Config) { - cfg.Metadata.Retry.Max = -1 - }, - "Metadata.Retry.Max must be >= 0"}, - {"Retry.Backoff", - func(cfg *Config) { - cfg.Metadata.Retry.Backoff = -1 - }, - "Metadata.Retry.Backoff must be >= 0"}, - {"RefreshFrequency", - func(cfg *Config) { - cfg.Metadata.RefreshFrequency = -1 - }, - "Metadata.RefreshFrequency must be >= 0"}, - } - - for i, test := range tests { - c := NewConfig() - test.cfg(c) - if err := c.Validate(); string(err.(ConfigurationError)) != test.err { - t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err) - } - } -} - -func TestAdminConfigValidates(t *testing.T) { - tests := []struct { - name string - cfg func(*Config) // resorting to using a function as a param because of internal composite structs - err string - }{ - {"Timeout", - func(cfg *Config) { - cfg.Admin.Timeout = 0 - }, - "Admin.Timeout must be > 0"}, - } - - for i, test := range tests { - c := NewConfig() - test.cfg(c) - if err := c.Validate(); string(err.(ConfigurationError)) != test.err { - t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err) - } - } -} - -func TestProducerConfigValidates(t *testing.T) { - tests := []struct { - name string - cfg func(*Config) // resorting to using a function as a param because of internal composite structs - err string - }{ - { - "MaxMessageBytes", - func(cfg *Config) { - cfg.Producer.MaxMessageBytes = 0 - }, - "Producer.MaxMessageBytes must be > 0"}, - {"RequiredAcks", - func(cfg *Config) { - cfg.Producer.RequiredAcks = -2 - }, - "Producer.RequiredAcks must be >= -1"}, - {"Timeout", - func(cfg *Config) { - cfg.Producer.Timeout = 0 - }, - "Producer.Timeout must be > 0"}, - {"Partitioner", - func(cfg *Config) { - cfg.Producer.Partitioner = nil - }, - "Producer.Partitioner must not be nil"}, - {"Flush.Bytes", - func(cfg *Config) { - cfg.Producer.Flush.Bytes = -1 - }, - "Producer.Flush.Bytes must be >= 0"}, - {"Flush.Messages", - func(cfg *Config) { - cfg.Producer.Flush.Messages = -1 - }, - "Producer.Flush.Messages must be >= 0"}, - {"Flush.Frequency", - func(cfg *Config) { - cfg.Producer.Flush.Frequency = -1 - }, - "Producer.Flush.Frequency must be >= 0"}, - {"Flush.MaxMessages", - func(cfg *Config) { - cfg.Producer.Flush.MaxMessages = -1 - }, - "Producer.Flush.MaxMessages must be >= 0"}, - {"Flush.MaxMessages with Producer.Flush.Messages", - func(cfg *Config) { - cfg.Producer.Flush.MaxMessages = 1 - cfg.Producer.Flush.Messages = 2 - }, - "Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set"}, - {"Flush.Retry.Max", - func(cfg *Config) { - cfg.Producer.Retry.Max = -1 - }, - "Producer.Retry.Max must be >= 0"}, - {"Flush.Retry.Backoff", - func(cfg *Config) { - cfg.Producer.Retry.Backoff = -1 - }, - "Producer.Retry.Backoff must be >= 0"}, - } - - for i, test := range tests { - c := NewConfig() - test.cfg(c) - if err := c.Validate(); string(err.(ConfigurationError)) != test.err { - t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err) - } - } -} - -func TestLZ4ConfigValidation(t *testing.T) { - config := NewConfig() - config.Producer.Compression = CompressionLZ4 - if err := config.Validate(); string(err.(ConfigurationError)) != "lz4 compression requires Version >= V0_10_0_0" { - t.Error("Expected invalid lz4/kafka version error, got ", err) - } - config.Version = V0_10_0_0 - if err := config.Validate(); err != nil { - t.Error("Expected lz4 to work, got ", err) - } -} - -// This example shows how to integrate with an existing registry as well as publishing metrics -// on the standard output -func ExampleConfig_metrics() { - // Our application registry - appMetricRegistry := metrics.NewRegistry() - appGauge := metrics.GetOrRegisterGauge("m1", appMetricRegistry) - appGauge.Update(1) - - config := NewConfig() - // Use a prefix registry instead of the default local one - config.MetricRegistry = metrics.NewPrefixedChildRegistry(appMetricRegistry, "sarama.") - - // Simulate a metric created by sarama without starting a broker - saramaGauge := metrics.GetOrRegisterGauge("m2", config.MetricRegistry) - saramaGauge.Update(2) - - metrics.WriteOnce(appMetricRegistry, os.Stdout) - // Output: - // gauge m1 - // value: 1 - // gauge sarama.m2 - // value: 2 -} diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go index 33d9d143..9bd8d182 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -3,20 +3,24 @@ package sarama import ( "errors" "fmt" + "math" "sync" "sync/atomic" "time" + + "github.com/rcrowley/go-metrics" ) // ConsumerMessage encapsulates a Kafka message returned by the consumer. type ConsumerMessage struct { - Key, Value []byte - Topic string - Partition int32 - Offset int64 + Headers []*RecordHeader // only set if kafka is version 0.11+ Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp - Headers []*RecordHeader // only set if kafka is version 0.11+ + + Key, Value []byte + Topic string + Partition int32 + Offset int64 } // ConsumerError is what is provided to the user when an error occurs. @@ -31,6 +35,10 @@ func (ce ConsumerError) Error() string { return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) } +func (ce ConsumerError) Unwrap() error { + return ce.Err +} + // ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. // It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors // when stopping. @@ -43,13 +51,7 @@ func (ce ConsumerErrors) Error() string { // Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() // on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of // scope. -// -// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking. -// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library -// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the -// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. type Consumer interface { - // Topics returns the set of available topics as retrieved from the cluster // metadata. This method is the same as Client.Topics(), and is provided for // convenience. @@ -75,13 +77,11 @@ type Consumer interface { } type consumer struct { - client Client - conf *Config - ownClient bool - - lock sync.Mutex + conf *Config children map[string]map[int32]*partitionConsumer brokerConsumers map[*Broker]*brokerConsumer + client Client + lock sync.Mutex } // NewConsumer creates a new consumer using the given broker addresses and configuration. @@ -90,18 +90,19 @@ func NewConsumer(addrs []string, config *Config) (Consumer, error) { if err != nil { return nil, err } - - c, err := NewConsumerFromClient(client) - if err != nil { - return nil, err - } - c.(*consumer).ownClient = true - return c, nil + return newConsumer(client) } // NewConsumerFromClient creates a new consumer using the given client. It is still // necessary to call Close() on the underlying client when shutting down this consumer. func NewConsumerFromClient(client Client) (Consumer, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newConsumer(cli) +} + +func newConsumer(client Client) (Consumer, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient @@ -118,10 +119,7 @@ func NewConsumerFromClient(client Client) (Consumer, error) { } func (c *consumer) Close() error { - if c.ownClient { - return c.client.Close() - } - return nil + return c.client.Close() } func (c *consumer) Topics() ([]string, error) { @@ -261,12 +259,11 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { // or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. // // To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of -// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process +// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process // AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call // Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will // also drain the Messages channel, harvest all errors & return them once cleanup has completed. type PartitionConsumer interface { - // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call @@ -298,22 +295,24 @@ type PartitionConsumer interface { type partitionConsumer struct { highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG - consumer *consumer - conf *Config - topic string - partition int32 + consumer *consumer + conf *Config broker *brokerConsumer messages chan *ConsumerMessage errors chan *ConsumerError feeder chan *FetchResponse + preferredReadReplica int32 + trigger, dying chan none - responseResult error closeOnce sync.Once - - fetchSize int32 - offset int64 + topic string + partition int32 + responseResult error + fetchSize int32 + offset int64 + retries int32 } var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing @@ -332,12 +331,20 @@ func (child *partitionConsumer) sendError(err error) { } } +func (child *partitionConsumer) computeBackoff() time.Duration { + if child.conf.Consumer.Retry.BackoffFunc != nil { + retries := atomic.AddInt32(&child.retries, 1) + return child.conf.Consumer.Retry.BackoffFunc(int(retries)) + } + return child.conf.Consumer.Retry.Backoff +} + func (child *partitionConsumer) dispatcher() { for range child.trigger { select { case <-child.dying: close(child.trigger) - case <-time.After(child.conf.Consumer.Retry.Backoff): + case <-time.After(child.computeBackoff()): if child.broker != nil { child.consumer.unrefBrokerConsumer(child.broker) child.broker = nil @@ -358,18 +365,29 @@ func (child *partitionConsumer) dispatcher() { close(child.feeder) } +func (child *partitionConsumer) preferredBroker() (*Broker, error) { + if child.preferredReadReplica >= 0 { + broker, err := child.consumer.client.Broker(child.preferredReadReplica) + if err == nil { + return broker, nil + } + } + + // if prefered replica cannot be found fallback to leader + return child.consumer.client.Leader(child.topic, child.partition) +} + func (child *partitionConsumer) dispatch() error { if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { return err } - var leader *Broker - var err error - if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { + broker, err := child.preferredBroker() + if err != nil { return err } - child.broker = child.consumer.refBrokerConsumer(leader) + child.broker = child.consumer.refBrokerConsumer(broker) child.broker.input <- child @@ -421,19 +439,13 @@ func (child *partitionConsumer) AsyncClose() { func (child *partitionConsumer) Close() error { child.AsyncClose() - go withRecover(func() { - for range child.messages { - // drain - } - }) - - var errors ConsumerErrors + var consumerErrors ConsumerErrors for err := range child.errors { - errors = append(errors, err) + consumerErrors = append(consumerErrors, err) } - if len(errors) > 0 { - return errors + if len(consumerErrors) > 0 { + return consumerErrors } return nil } @@ -451,17 +463,32 @@ feederLoop: for response := range child.feeder { msgs, child.responseResult = child.parseResponse(response) + if child.responseResult == nil { + atomic.StoreInt32(&child.retries, 0) + } + for i, msg := range msgs { + for _, interceptor := range child.conf.Consumer.Interceptors { + msg.safelyApplyInterceptor(interceptor) + } messageSelect: select { + case <-child.dying: + child.broker.acks.Done() + continue feederLoop case child.messages <- msg: firstAttempt = true case <-expiryTicker.C: if !firstAttempt { child.responseResult = errTimedOut child.broker.acks.Done() + remainingLoop: for _, msg = range msgs[i:] { - child.messages <- msg + select { + case child.messages <- msg: + case <-child.dying: + break remainingLoop + } } child.broker.input <- child continue feederLoop @@ -487,9 +514,13 @@ func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMe for _, msgBlock := range msgSet.Messages { for _, msg := range msgBlock.Messages() { offset := msg.Offset + timestamp := msg.Msg.Timestamp if msg.Msg.Version >= 1 { baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset offset += baseOffset + if msg.Msg.LogAppendTime { + timestamp = msgBlock.Msg.Timestamp + } } if offset < child.offset { continue @@ -500,43 +531,65 @@ func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMe Key: msg.Msg.Key, Value: msg.Msg.Value, Offset: offset, - Timestamp: msg.Msg.Timestamp, + Timestamp: timestamp, BlockTimestamp: msgBlock.Msg.Timestamp, }) child.offset = offset + 1 } } if len(messages) == 0 { - return nil, ErrIncompleteResponse + child.offset++ } return messages, nil } func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { - var messages []*ConsumerMessage + messages := make([]*ConsumerMessage, 0, len(batch.Records)) + for _, rec := range batch.Records { offset := batch.FirstOffset + rec.OffsetDelta if offset < child.offset { continue } + timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta) + if batch.LogAppendTime { + timestamp = batch.MaxTimestamp + } messages = append(messages, &ConsumerMessage{ Topic: child.topic, Partition: child.partition, Key: rec.Key, Value: rec.Value, Offset: offset, - Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta), + Timestamp: timestamp, Headers: rec.Headers, }) child.offset = offset + 1 } if len(messages) == 0 { - child.offset += 1 + child.offset++ } return messages, nil } func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { + var ( + metricRegistry = child.conf.MetricRegistry + consumerBatchSizeMetric metrics.Histogram + ) + + if metricRegistry != nil { + consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry) + } + + // If request was throttled and empty we log and return without error + if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 { + Logger.Printf( + "consumer/broker/%d FetchResponse throttled %v\n", + child.broker.broker.ID(), response.ThrottleTime) + return nil, nil + } + block := response.GetBlock(child.topic, child.partition) if block == nil { return nil, ErrIncompleteResponse @@ -550,6 +603,11 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu if err != nil { return nil, err } + + consumerBatchSizeMetric.Update(int64(nRecs)) + + child.preferredReadReplica = block.PreferredReadReplica + if nRecs == 0 { partialTrailingMessage, err := block.isPartial() if err != nil { @@ -564,6 +622,10 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu child.offset++ // skip this one so we can keep processing future messages } else { child.fetchSize *= 2 + // check int32 overflow + if child.fetchSize < 0 { + child.fetchSize = math.MaxInt32 + } if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { child.fetchSize = child.conf.Consumer.Fetch.Max } @@ -577,7 +639,13 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu child.fetchSize = child.conf.Consumer.Fetch.Default atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) - messages := []*ConsumerMessage{} + // abortedProducerIDs contains producerID which message should be ignored as uncommitted + // - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset) + // - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over + abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions)) + abortedTransactions := block.getAbortedTransactions() + + var messages []*ConsumerMessage for _, records := range block.RecordsSet { switch records.recordsType { case legacyRecords: @@ -588,14 +656,56 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu messages = append(messages, messageSetMessages...) case defaultRecords: + // Consume remaining abortedTransaction up to last offset of current batch + for _, txn := range abortedTransactions { + if txn.FirstOffset > records.RecordBatch.LastOffset() { + break + } + abortedProducerIDs[txn.ProducerID] = struct{}{} + // Pop abortedTransactions so that we never add it again + abortedTransactions = abortedTransactions[1:] + } + recordBatchMessages, err := child.parseRecords(records.RecordBatch) if err != nil { return nil, err } - if control, err := records.isControl(); err != nil || control { + + // Parse and commit offset but do not expose messages that are: + // - control records + // - part of an aborted transaction when set to `ReadCommitted` + + // control record + isControl, err := records.isControl() + if err != nil { + // I don't know why there is this continue in case of error to begin with + // Safe bet is to ignore control messages if ReadUncommitted + // and block on them in case of error and ReadCommitted + if child.conf.Consumer.IsolationLevel == ReadCommitted { + return nil, err + } + continue + } + if isControl { + controlRecord, err := records.getControlRecord() + if err != nil { + return nil, err + } + + if controlRecord.Type == ControlRecordAbort { + delete(abortedProducerIDs, records.RecordBatch.ProducerID) + } continue } + // filter aborted transactions + if child.conf.Consumer.IsolationLevel == ReadCommitted { + _, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID] + if records.RecordBatch.IsTransactional && isAborted { + continue + } + } + messages = append(messages, recordBatchMessages...) default: return nil, fmt.Errorf("unknown records type: %v", records.recordsType) @@ -605,15 +715,13 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu return messages, nil } -// brokerConsumer - type brokerConsumer struct { consumer *consumer broker *Broker input chan *partitionConsumer newSubscriptions chan []*partitionConsumer - wait chan none subscriptions map[*partitionConsumer]none + wait chan none acks sync.WaitGroup refs int } @@ -635,14 +743,14 @@ func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { return bc } +// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer +// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks +// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give +// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, +// so the main goroutine can block waiting for work if it has none. func (bc *brokerConsumer) subscriptionManager() { var buffer []*partitionConsumer - // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer - // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks - // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give - // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, - // so the main goroutine can block waiting for work if it has none. for { if len(buffer) > 0 { select { @@ -675,10 +783,10 @@ done: close(bc.newSubscriptions) } +//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available func (bc *brokerConsumer) subscriptionConsumer() { <-bc.wait // wait for our first piece of work - // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available for newSubscriptions := range bc.newSubscriptions { bc.updateSubscriptions(newSubscriptions) @@ -719,20 +827,30 @@ func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsu close(child.trigger) delete(bc.subscriptions, child) default: - break + // no-op } } } +//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed func (bc *brokerConsumer) handleResponses() { - // handles the response codes left for us by our subscriptions, and abandons ones that have been closed for child := range bc.subscriptions { result := child.responseResult child.responseResult = nil + if result == nil { + if child.preferredReadReplica >= 0 && bc.broker.ID() != child.preferredReadReplica { + // not an error but needs redispatching to consume from prefered replica + child.trigger <- none{} + delete(bc.subscriptions, child) + } + continue + } + + // Discard any replica preference. + child.preferredReadReplica = -1 + switch result { - case nil: - break case errTimedOut: Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", bc.broker.ID(), child.topic, child.partition) @@ -787,6 +905,9 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { MinBytes: bc.consumer.conf.Consumer.Fetch.Min, MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), } + if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) { + request.Version = 1 + } if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { request.Version = 2 } @@ -796,7 +917,22 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { } if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { request.Version = 4 - request.Isolation = ReadUncommitted // We don't support yet transactions. + request.Isolation = bc.consumer.conf.Consumer.IsolationLevel + } + if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) { + request.Version = 7 + // We do not currently implement KIP-227 FetchSessions. Setting the id to 0 + // and the epoch to -1 tells the broker not to generate as session ID we're going + // to just ignore anyway. + request.SessionID = 0 + request.SessionEpoch = -1 + } + if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) { + request.Version = 10 + } + if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) { + request.Version = 11 + request.RackID = bc.consumer.conf.RackID } for child := range bc.subscriptions { diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go index 33a23147..fcc5792e 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group.go +++ b/vendor/github.com/Shopify/sarama/consumer_group.go @@ -33,11 +33,14 @@ type ConsumerGroup interface { // to allow the user to perform any final tasks before a rebalance. // 6. Finally, marked offsets are committed one last time before claims are released. // - // Please note, that once a relance is triggered, sessions must be completed within + // Please note, that once a rebalance is triggered, sessions must be completed within // Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit // as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout // is exceeded, the consumer will be removed from the group by Kafka, which will cause offset // commit failures. + // This method should be called inside an infinite loop, when a + // server-side rebalance happens, the consumer session will need to be + // recreated to get the new claims. Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error // Errors returns a read channel of errors that occurred during the consumer life-cycle. @@ -52,8 +55,7 @@ type ConsumerGroup interface { } type consumerGroup struct { - client Client - ownClient bool + client Client config *Config consumer Consumer @@ -64,6 +66,8 @@ type consumerGroup struct { lock sync.Mutex closed chan none closeOnce sync.Once + + userData []byte } // NewConsumerGroup creates a new consumer group the given broker addresses and configuration. @@ -73,20 +77,24 @@ func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerG return nil, err } - c, err := NewConsumerGroupFromClient(groupID, client) + c, err := newConsumerGroup(groupID, client) if err != nil { _ = client.Close() - return nil, err } - - c.(*consumerGroup).ownClient = true - return c, nil + return c, err } -// NewConsumerFromClient creates a new consumer group using the given client. It is still +// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still // necessary to call Close() on the underlying client when shutting down this consumer. // PLEASE NOTE: consumer groups can only re-use but not share clients. func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newConsumerGroup(groupID, cli) +} + +func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { config := client.Config() if !config.Version.IsAtLeast(V0_10_2_0) { return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0") @@ -115,9 +123,6 @@ func (c *consumerGroup) Close() (err error) { c.closeOnce.Do(func() { close(c.closed) - c.lock.Lock() - defer c.lock.Unlock() - // leave group if e := c.leave(); e != nil { err = e @@ -131,10 +136,8 @@ func (c *consumerGroup) Close() (err error) { err = e } - if c.ownClient { - if e := c.client.Close(); e != nil { - err = e - } + if e := c.client.Close(); e != nil { + err = e } }) return @@ -162,20 +165,19 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co return err } - // Get coordinator - coordinator, err := c.client.Coordinator(c.groupID) - if err != nil { - return err - } - // Init session - sess, err := c.newSession(ctx, coordinator, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max) + sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max) if err == ErrClosedClient { return ErrClosedConsumerGroup } else if err != nil { return err } + // loop check topic partition numbers changed + // will trigger rebalance when any topic partitions number had changed + // avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine + go c.loopCheckPartitionNumbers(topics, sess) + // Wait for session exit signal <-sess.ctx.Done() @@ -183,7 +185,33 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co return sess.release(true) } -func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) { +func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) { + select { + case <-c.closed: + return nil, ErrClosedConsumerGroup + case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): + } + + if refreshCoordinator { + err := c.client.RefreshCoordinator(c.groupID) + if err != nil { + return c.retryNewSession(ctx, topics, handler, retries, true) + } + } + + return c.newSession(ctx, topics, handler, retries-1) +} + +func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) { + coordinator, err := c.client.Coordinator(c.groupID) + if err != nil { + if retries <= 0 { + return nil, err + } + + return c.retryNewSession(ctx, topics, handler, retries, true) + } + // Join consumer group join, err := c.joinGroupRequest(coordinator, topics) if err != nil { @@ -195,19 +223,19 @@ func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, top c.memberID = join.MemberId case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately c.memberID = "" - return c.newSession(ctx, coordinator, topics, handler, retries) - case ErrRebalanceInProgress: // retry after backoff + return c.newSession(ctx, topics, handler, retries) + case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh if retries <= 0 { return nil, join.Err } - select { - case <-c.closed: - return nil, ErrClosedConsumerGroup - case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): + return c.retryNewSession(ctx, topics, handler, retries, true) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, join.Err } - return c.newSession(ctx, coordinator, topics, handler, retries-1) + return c.retryNewSession(ctx, topics, handler, retries, false) default: return nil, join.Err } @@ -227,47 +255,48 @@ func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, top } // Sync consumer group - sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId) + groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId) if err != nil { _ = coordinator.Close() return nil, err } - switch sync.Err { + switch groupRequest.Err { case ErrNoError: case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately c.memberID = "" - return c.newSession(ctx, coordinator, topics, handler, retries) - case ErrRebalanceInProgress: // retry after backoff + return c.newSession(ctx, topics, handler, retries) + case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh if retries <= 0 { - return nil, sync.Err + return nil, groupRequest.Err } - select { - case <-c.closed: - return nil, ErrClosedConsumerGroup - case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): + return c.retryNewSession(ctx, topics, handler, retries, true) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, groupRequest.Err } - return c.newSession(ctx, coordinator, topics, handler, retries-1) + return c.retryNewSession(ctx, topics, handler, retries, false) default: - return nil, sync.Err + return nil, groupRequest.Err } // Retrieve and sort claims var claims map[string][]int32 - if len(sync.MemberAssignment) > 0 { - members, err := sync.GetMemberAssignment() + if len(groupRequest.MemberAssignment) > 0 { + members, err := groupRequest.GetMemberAssignment() if err != nil { return nil, err } claims = members.Topics + c.userData = members.UserData for _, partitions := range claims { sort.Sort(int32Slice(partitions)) } } - return newConsumerGroupSession(c, ctx, claims, join.MemberId, join.GenerationId, handler) + return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler) } func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) { @@ -282,9 +311,14 @@ func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) ( req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond) } + // use static user-data if configured, otherwise use consumer-group userdata from the last sync + userData := c.config.Consumer.Group.Member.UserData + if len(userData) == 0 { + userData = c.userData + } meta := &ConsumerGroupMemberMetadata{ Topics: topics, - UserData: c.config.Consumer.Group.Member.UserData, + UserData: userData, } strategy := c.config.Consumer.Group.Rebalance.Strategy if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { @@ -300,13 +334,17 @@ func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrate MemberId: c.memberID, GenerationId: generationID, } + strategy := c.config.Consumer.Group.Rebalance.Strategy for memberID, topics := range plan { - err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{ - Topics: topics, - }) + assignment := &ConsumerGroupMemberAssignment{Topics: topics} + userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID) if err != nil { return nil, err } + assignment.UserData = userDataBytes + if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil { + return nil, err + } } return coordinator.SyncGroup(req) } @@ -341,8 +379,10 @@ func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) return strategy.Plan(members, topics) } -// Leaves the cluster, called by Close, protected by lock. +// Leaves the cluster, called by Close. func (c *consumerGroup) leave() error { + c.lock.Lock() + defer c.lock.Unlock() if c.memberID == "" { return nil } @@ -374,12 +414,6 @@ func (c *consumerGroup) leave() error { } func (c *consumerGroup) handleError(err error, topic string, partition int32) { - select { - case <-c.closed: - return - default: - } - if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 { err = &ConsumerError{ Topic: topic, @@ -388,16 +422,69 @@ func (c *consumerGroup) handleError(err error, topic string, partition int32) { } } - if c.config.Consumer.Return.Errors { + if !c.config.Consumer.Return.Errors { + Logger.Println(err) + return + } + + select { + case <-c.closed: + //consumer is closed + return + default: + } + + select { + case c.errors <- err: + default: + // no error listener + } +} + +func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) { + pause := time.NewTicker(c.config.Metadata.RefreshFrequency) + defer session.cancel() + defer pause.Stop() + var oldTopicToPartitionNum map[string]int + var err error + if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil { + return + } + for { + if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil { + return + } else { + for topic, num := range oldTopicToPartitionNum { + if newTopicToPartitionNum[topic] != num { + return // trigger the end of the session on exit + } + } + } select { - case c.errors <- err: - default: + case <-pause.C: + case <-session.ctx.Done(): + Logger.Printf("loop check partition number coroutine will exit, topics %s", topics) + // if session closed by other, should be exited + return + case <-c.closed: + return } - } else { - Logger.Println(err) } } +func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) { + topicToPartitionNum := make(map[string]int, len(topics)) + for _, topic := range topics { + if partitionNum, err := c.client.Partitions(topic); err != nil { + Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err) + return nil, err + } else { + topicToPartitionNum[topic] = len(partitionNum) + } + } + return topicToPartitionNum, nil +} + // -------------------------------------------------------------------- // ConsumerGroupSession represents a consumer group member session. @@ -426,6 +513,11 @@ type ConsumerGroupSession interface { // message twice, and your processing should ideally be idempotent. MarkOffset(topic string, partition int32, offset int64, metadata string) + // Commit the offset to the backend + // + // Note: calling Commit performs a blocking synchronous operation. + Commit() + // ResetOffset resets to the provided offset, alongside a metadata string that // represents the state of the partition consumer at that point in time. Reset // acts as a counterpart to MarkOffset, the difference being that it allows to @@ -456,7 +548,7 @@ type consumerGroupSession struct { hbDying, hbDead chan none } -func newConsumerGroupSession(parent *consumerGroup, ctx context.Context, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) { +func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) { // init offset manager offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client) if err != nil { @@ -537,6 +629,10 @@ func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset } } +func (s *consumerGroupSession) Commit() { + s.offsets.Commit() +} + func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { if pom := s.offsets.findPOM(topic, partition); pom != nil { pom.ResetOffset(offset, metadata) @@ -595,7 +691,7 @@ func (s *consumerGroupSession) consume(topic string, partition int32) { s.parent.handleError(err, topic, partition) } - // ensure consumer is clased & drained + // ensure consumer is closed & drained claim.AsyncClose() for _, err := range claim.waitClosed() { s.parent.handleError(err, topic, partition) @@ -613,7 +709,7 @@ func (s *consumerGroupSession) release(withCleanup bool) (err error) { s.releaseOnce.Do(func() { if withCleanup { if e := s.handler.Cleanup(s); e != nil { - s.parent.handleError(err, "", -1) + s.parent.handleError(e, "", -1) err = e } } @@ -657,6 +753,12 @@ func (s *consumerGroupSession) heartbeatLoop() { resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID) if err != nil { _ = coordinator.Close() + + if retries <= 0 { + s.parent.handleError(err, "", -1) + return + } + retries-- continue } @@ -667,7 +769,7 @@ func (s *consumerGroupSession) heartbeatLoop() { case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration: return default: - s.parent.handleError(err, "", -1) + s.parent.handleError(resp.Err, "", -1) return } @@ -691,7 +793,7 @@ type ConsumerGroupHandler interface { // Setup is run at the beginning of a new session, before ConsumeClaim. Setup(ConsumerGroupSession) error - // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exites + // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited // but before the offsets are committed for the very last time. Cleanup(ConsumerGroupSession) error diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go index 9d92d350..2d02cc38 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group_members.go +++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go @@ -1,5 +1,6 @@ package sarama +//ConsumerGroupMemberMetadata holds the metadata for consumer group type ConsumerGroupMemberMetadata struct { Version int16 Topics []string @@ -36,6 +37,7 @@ func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { return nil } +//ConsumerGroupMemberAssignment holds the member assignment for a consume group type ConsumerGroupMemberAssignment struct { Version int16 Topics map[string][]int32 diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members_test.go b/vendor/github.com/Shopify/sarama/consumer_group_members_test.go deleted file mode 100644 index d65e8adc..00000000 --- a/vendor/github.com/Shopify/sarama/consumer_group_members_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package sarama - -import ( - "bytes" - "reflect" - "testing" -) - -var ( - groupMemberMetadata = []byte{ - 0, 1, // Version - 0, 0, 0, 2, // Topic array length - 0, 3, 'o', 'n', 'e', // Topic one - 0, 3, 't', 'w', 'o', // Topic two - 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata - } - groupMemberAssignment = []byte{ - 0, 1, // Version - 0, 0, 0, 1, // Topic array length - 0, 3, 'o', 'n', 'e', // Topic one - 0, 0, 0, 3, // Topic one, partition array length - 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 4, // 0, 2, 4 - 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata - } -) - -func TestConsumerGroupMemberMetadata(t *testing.T) { - meta := &ConsumerGroupMemberMetadata{ - Version: 1, - Topics: []string{"one", "two"}, - UserData: []byte{0x01, 0x02, 0x03}, - } - - buf, err := encode(meta, nil) - if err != nil { - t.Error("Failed to encode data", err) - } else if !bytes.Equal(groupMemberMetadata, buf) { - t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberMetadata, buf) - } - - meta2 := new(ConsumerGroupMemberMetadata) - err = decode(buf, meta2) - if err != nil { - t.Error("Failed to decode data", err) - } else if !reflect.DeepEqual(meta, meta2) { - t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", meta, meta2) - } -} - -func TestConsumerGroupMemberAssignment(t *testing.T) { - amt := &ConsumerGroupMemberAssignment{ - Version: 1, - Topics: map[string][]int32{ - "one": {0, 2, 4}, - }, - UserData: []byte{0x01, 0x02, 0x03}, - } - - buf, err := encode(amt, nil) - if err != nil { - t.Error("Failed to encode data", err) - } else if !bytes.Equal(groupMemberAssignment, buf) { - t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberAssignment, buf) - } - - amt2 := new(ConsumerGroupMemberAssignment) - err = decode(buf, amt2) - if err != nil { - t.Error("Failed to decode data", err) - } else if !reflect.DeepEqual(amt, amt2) { - t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", amt, amt2) - } -} diff --git a/vendor/github.com/Shopify/sarama/consumer_group_test.go b/vendor/github.com/Shopify/sarama/consumer_group_test.go deleted file mode 100644 index 8bf44e66..00000000 --- a/vendor/github.com/Shopify/sarama/consumer_group_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package sarama - -import ( - "context" - "fmt" -) - -type exampleConsumerGroupHandler struct{} - -func (exampleConsumerGroupHandler) Setup(_ ConsumerGroupSession) error { return nil } -func (exampleConsumerGroupHandler) Cleanup(_ ConsumerGroupSession) error { return nil } -func (h exampleConsumerGroupHandler) ConsumeClaim(sess ConsumerGroupSession, claim ConsumerGroupClaim) error { - for msg := range claim.Messages() { - fmt.Printf("Message topic:%q partition:%d offset:%d\n", msg.Topic, msg.Partition, msg.Offset) - sess.MarkMessage(msg, "") - } - return nil -} - -func ExampleConsumerGroup() { - // Init config, specify appropriate version - config := NewConfig() - config.Version = V1_0_0_0 - config.Consumer.Return.Errors = true - - // Start with a client - client, err := NewClient([]string{"localhost:9092"}, config) - if err != nil { - panic(err) - } - defer func() { _ = client.Close() }() - - // Start a new consumer group - group, err := NewConsumerGroupFromClient("my-group", client) - if err != nil { - panic(err) - } - defer func() { _ = group.Close() }() - - // Track errors - go func() { - for err := range group.Errors() { - fmt.Println("ERROR", err) - } - }() - - // Iterate over consumer sessions. - ctx := context.Background() - for { - topics := []string{"my-topic"} - handler := exampleConsumerGroupHandler{} - - err := group.Consume(ctx, topics, handler) - if err != nil { - panic(err) - } - } -} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go index 4de45e7b..e5ebdaef 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go @@ -1,5 +1,6 @@ package sarama +//ConsumerMetadataRequest is used for metadata requests type ConsumerMetadataRequest struct { ConsumerGroup string } @@ -28,6 +29,10 @@ func (r *ConsumerMetadataRequest) version() int16 { return 0 } +func (r *ConsumerMetadataRequest) headerVersion() int16 { + return 1 +} + func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { return V0_8_2_0 } diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go deleted file mode 100644 index 24e5f0a4..00000000 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - consumerMetadataRequestEmpty = []byte{ - 0x00, 0x00} - - consumerMetadataRequestString = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r'} -) - -func TestConsumerMetadataRequest(t *testing.T) { - request := new(ConsumerMetadataRequest) - testEncodable(t, "empty string", request, consumerMetadataRequestEmpty) - testVersionDecodable(t, "empty string", request, consumerMetadataRequestEmpty, 0) - - request.ConsumerGroup = "foobar" - testEncodable(t, "with string", request, consumerMetadataRequestString) - testVersionDecodable(t, "with string", request, consumerMetadataRequestString, 0) -} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go index 442cbde7..1b5d00d2 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go @@ -5,6 +5,7 @@ import ( "strconv" ) +//ConsumerMetadataResponse holds the response for a consumer group meta data requests type ConsumerMetadataResponse struct { Err KError Coordinator *Broker @@ -72,6 +73,10 @@ func (r *ConsumerMetadataResponse) version() int16 { return 0 } +func (r *ConsumerMetadataResponse) headerVersion() int16 { + return 0 +} + func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { return V0_8_2_0 } diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go deleted file mode 100644 index 8482f6ff..00000000 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package sarama - -import "testing" - -var ( - consumerMetadataResponseError = []byte{ - 0x00, 0x0E, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - consumerMetadataResponseSuccess = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0xAB, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0xCC, 0xDD} -) - -func TestConsumerMetadataResponseError(t *testing.T) { - response := &ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress} - testEncodable(t, "", response, consumerMetadataResponseError) - - decodedResp := &ConsumerMetadataResponse{} - if err := versionedDecode(consumerMetadataResponseError, decodedResp, 0); err != nil { - t.Error("could not decode: ", err) - } - - if decodedResp.Err != ErrOffsetsLoadInProgress { - t.Errorf("got %s, want %s", decodedResp.Err, ErrOffsetsLoadInProgress) - } -} - -func TestConsumerMetadataResponseSuccess(t *testing.T) { - broker := NewBroker("foo:52445") - broker.id = 0xAB - response := ConsumerMetadataResponse{ - Coordinator: broker, - CoordinatorID: 0xAB, - CoordinatorHost: "foo", - CoordinatorPort: 0xCCDD, - Err: ErrNoError, - } - testResponse(t, "success", &response, consumerMetadataResponseSuccess) -} diff --git a/vendor/github.com/Shopify/sarama/consumer_test.go b/vendor/github.com/Shopify/sarama/consumer_test.go deleted file mode 100644 index 4bd66290..00000000 --- a/vendor/github.com/Shopify/sarama/consumer_test.go +++ /dev/null @@ -1,1036 +0,0 @@ -package sarama - -import ( - "log" - "os" - "os/signal" - "sync" - "testing" - "time" -) - -var testMsg = StringEncoder("Foo") - -// If a particular offset is provided then messages are consumed starting from -// that offset. -func TestConsumerOffsetManual(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - - mockFetchResponse := NewMockFetchResponse(t, 1) - for i := 0; i < 10; i++ { - mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg) - } - - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 2345), - "FetchRequest": mockFetchResponse, - }) - - // When - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - consumer, err := master.ConsumePartition("my_topic", 0, 1234) - if err != nil { - t.Fatal(err) - } - - // Then: messages starting from offset 1234 are consumed. - for i := 0; i < 10; i++ { - select { - case message := <-consumer.Messages(): - assertMessageOffset(t, message, int64(i+1234)) - case err := <-consumer.Errors(): - t.Error(err) - } - } - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -// If `OffsetNewest` is passed as the initial offset then the first consumed -// message is indeed corresponds to the offset that broker claims to be the -// newest in its metadata response. -func TestConsumerOffsetNewest(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 10). - SetOffset("my_topic", 0, OffsetOldest, 7), - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 9, testMsg). - SetMessage("my_topic", 0, 10, testMsg). - SetMessage("my_topic", 0, 11, testMsg). - SetHighWaterMark("my_topic", 0, 14), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest) - if err != nil { - t.Fatal(err) - } - - // Then - assertMessageOffset(t, <-consumer.Messages(), 10) - if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 { - t.Errorf("Expected high water mark offset 14, found %d", hwmo) - } - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -// It is possible to close a partition consumer and create the same anew. -func TestConsumerRecreate(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 10, testMsg), - }) - - c, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - pc, err := c.ConsumePartition("my_topic", 0, 10) - if err != nil { - t.Fatal(err) - } - assertMessageOffset(t, <-pc.Messages(), 10) - - // When - safeClose(t, pc) - pc, err = c.ConsumePartition("my_topic", 0, 10) - if err != nil { - t.Fatal(err) - } - - // Then - assertMessageOffset(t, <-pc.Messages(), 10) - - safeClose(t, pc) - safeClose(t, c) - broker0.Close() -} - -// An attempt to consume the same partition twice should fail. -func TestConsumerDuplicate(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": NewMockFetchResponse(t, 1), - }) - - config := NewConfig() - config.ChannelBufferSize = 0 - c, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - pc1, err := c.ConsumePartition("my_topic", 0, 0) - if err != nil { - t.Fatal(err) - } - - // When - pc2, err := c.ConsumePartition("my_topic", 0, 0) - - // Then - if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") { - t.Fatal("A partition cannot be consumed twice at the same time") - } - - safeClose(t, pc1) - safeClose(t, c) - broker0.Close() -} - -// If consumer fails to refresh metadata it keeps retrying with frequency -// specified by `Config.Consumer.Retry.Backoff`. -func TestConsumerLeaderRefreshError(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 100) - - // Stage 1: my_topic/0 served by broker0 - Logger.Printf(" STAGE 1") - - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 123). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 123, testMsg), - }) - - config := NewConfig() - config.Net.ReadTimeout = 100 * time.Millisecond - config.Consumer.Retry.Backoff = 200 * time.Millisecond - config.Consumer.Return.Errors = true - config.Metadata.Retry.Max = 0 - c, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) - if err != nil { - t.Fatal(err) - } - - assertMessageOffset(t, <-pc.Messages(), 123) - - // Stage 2: broker0 says that it is no longer the leader for my_topic/0, - // but the requests to retrieve metadata fail with network timeout. - Logger.Printf(" STAGE 2") - - fetchResponse2 := &FetchResponse{} - fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) - - broker0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": NewMockWrapper(fetchResponse2), - }) - - if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers { - t.Errorf("Unexpected error: %v", consErr.Err) - } - - // Stage 3: finally the metadata returned by broker0 tells that broker1 is - // a new leader for my_topic/0. Consumption resumes. - - Logger.Printf(" STAGE 3") - - broker1 := NewMockBroker(t, 101) - - broker1.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 124, testMsg), - }) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetBroker(broker1.Addr(), broker1.BrokerID()). - SetLeader("my_topic", 0, broker1.BrokerID()), - }) - - assertMessageOffset(t, <-pc.Messages(), 124) - - safeClose(t, pc) - safeClose(t, c) - broker1.Close() - broker0.Close() -} - -func TestConsumerInvalidTopic(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 100) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()), - }) - - c, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) - - // Then - if pc != nil || err != ErrUnknownTopicOrPartition { - t.Errorf("Should fail with, err=%v", err) - } - - safeClose(t, c) - broker0.Close() -} - -// Nothing bad happens if a partition consumer that has no leader assigned at -// the moment is closed. -func TestConsumerClosePartitionWithoutLeader(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 100) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 123). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 123, testMsg), - }) - - config := NewConfig() - config.Net.ReadTimeout = 100 * time.Millisecond - config.Consumer.Retry.Backoff = 100 * time.Millisecond - config.Consumer.Return.Errors = true - config.Metadata.Retry.Max = 0 - c, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) - if err != nil { - t.Fatal(err) - } - - assertMessageOffset(t, <-pc.Messages(), 123) - - // broker0 says that it is no longer the leader for my_topic/0, but the - // requests to retrieve metadata fail with network timeout. - fetchResponse2 := &FetchResponse{} - fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) - - broker0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": NewMockWrapper(fetchResponse2), - }) - - // When - if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers { - t.Errorf("Unexpected error: %v", consErr.Err) - } - - // Then: the partition consumer can be closed without any problem. - safeClose(t, pc) - safeClose(t, c) - broker0.Close() -} - -// If the initial offset passed on partition consumer creation is out of the -// actual offset range for the partition, then the partition consumer stops -// immediately closing its output channels. -func TestConsumerShutsDownOutOfRange(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - fetchResponse := new(FetchResponse) - fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 7), - "FetchRequest": NewMockWrapper(fetchResponse), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 101) - if err != nil { - t.Fatal(err) - } - - // Then: consumer should shut down closing its messages and errors channels. - if _, ok := <-consumer.Messages(); ok { - t.Error("Expected the consumer to shut down") - } - safeClose(t, consumer) - - safeClose(t, master) - broker0.Close() -} - -// If a fetch response contains messages with offsets that are smaller then -// requested, then such messages are ignored. -func TestConsumerExtraOffsets(t *testing.T) { - // Given - legacyFetchResponse := &FetchResponse{} - legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1) - legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2) - legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3) - legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4) - newFetchResponse := &FetchResponse{Version: 4} - newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 1) - newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 2) - newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 3) - newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 4) - newFetchResponse.SetLastOffsetDelta("my_topic", 0, 4) - newFetchResponse.SetLastStableOffset("my_topic", 0, 4) - for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} { - var offsetResponseVersion int16 - cfg := NewConfig() - cfg.Consumer.Return.Errors = true - if fetchResponse1.Version >= 4 { - cfg.Version = V0_11_0_0 - offsetResponseVersion = 1 - } - - broker0 := NewMockBroker(t, 0) - fetchResponse2 := &FetchResponse{} - fetchResponse2.Version = fetchResponse1.Version - fetchResponse2.AddError("my_topic", 0, ErrNoError) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetVersion(offsetResponseVersion). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 0), - "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, cfg) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 3) - if err != nil { - t.Fatal(err) - } - - // Then: messages with offsets 1 and 2 are not returned even though they - // are present in the response. - select { - case msg := <-consumer.Messages(): - assertMessageOffset(t, msg, 3) - case err := <-consumer.Errors(): - t.Fatal(err) - } - - select { - case msg := <-consumer.Messages(): - assertMessageOffset(t, msg, 4) - case err := <-consumer.Errors(): - t.Fatal(err) - } - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() - } -} - -// In some situations broker may return a block containing only -// messages older then requested, even though there would be -// more messages if higher offset was requested. -func TestConsumerReceivingFetchResponseWithTooOldRecords(t *testing.T) { - // Given - fetchResponse1 := &FetchResponse{Version: 4} - fetchResponse1.AddRecord("my_topic", 0, nil, testMsg, 1) - - fetchResponse2 := &FetchResponse{Version: 4} - fetchResponse2.AddRecord("my_topic", 0, nil, testMsg, 1000000) - - cfg := NewConfig() - cfg.Consumer.Return.Errors = true - cfg.Version = V1_1_0_0 - - broker0 := NewMockBroker(t, 0) - - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetVersion(1). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 0), - "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, cfg) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 2) - if err != nil { - t.Fatal(err) - } - - select { - case msg := <-consumer.Messages(): - assertMessageOffset(t, msg, 1000000) - case err := <-consumer.Errors(): - t.Fatal(err) - } - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -func TestConsumeMessageWithNewerFetchAPIVersion(t *testing.T) { - // Given - fetchResponse1 := &FetchResponse{Version: 4} - fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1) - fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2) - - cfg := NewConfig() - cfg.Version = V0_11_0_0 - - broker0 := NewMockBroker(t, 0) - fetchResponse2 := &FetchResponse{} - fetchResponse2.Version = 4 - fetchResponse2.AddError("my_topic", 0, ErrNoError) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetVersion(1). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 0), - "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, cfg) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 1) - if err != nil { - t.Fatal(err) - } - - assertMessageOffset(t, <-consumer.Messages(), 1) - assertMessageOffset(t, <-consumer.Messages(), 2) - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -// It is fine if offsets of fetched messages are not sequential (although -// strictly increasing!). -func TestConsumerNonSequentialOffsets(t *testing.T) { - // Given - legacyFetchResponse := &FetchResponse{} - legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5) - legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7) - legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11) - newFetchResponse := &FetchResponse{Version: 4} - newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 5) - newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 7) - newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 11) - newFetchResponse.SetLastOffsetDelta("my_topic", 0, 11) - newFetchResponse.SetLastStableOffset("my_topic", 0, 11) - for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} { - var offsetResponseVersion int16 - cfg := NewConfig() - if fetchResponse1.Version >= 4 { - cfg.Version = V0_11_0_0 - offsetResponseVersion = 1 - } - - broker0 := NewMockBroker(t, 0) - fetchResponse2 := &FetchResponse{Version: fetchResponse1.Version} - fetchResponse2.AddError("my_topic", 0, ErrNoError) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetVersion(offsetResponseVersion). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 0), - "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, cfg) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 3) - if err != nil { - t.Fatal(err) - } - - // Then: messages with offsets 1 and 2 are not returned even though they - // are present in the response. - assertMessageOffset(t, <-consumer.Messages(), 5) - assertMessageOffset(t, <-consumer.Messages(), 7) - assertMessageOffset(t, <-consumer.Messages(), 11) - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() - } -} - -// If leadership for a partition is changing then consumer resolves the new -// leader and switches to it. -func TestConsumerRebalancingMultiplePartitions(t *testing.T) { - // initial setup - seedBroker := NewMockBroker(t, 10) - leader0 := NewMockBroker(t, 0) - leader1 := NewMockBroker(t, 1) - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(leader0.Addr(), leader0.BrokerID()). - SetBroker(leader1.Addr(), leader1.BrokerID()). - SetLeader("my_topic", 0, leader0.BrokerID()). - SetLeader("my_topic", 1, leader1.BrokerID()), - }) - - mockOffsetResponse1 := NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 1000). - SetOffset("my_topic", 1, OffsetOldest, 0). - SetOffset("my_topic", 1, OffsetNewest, 1000) - leader0.SetHandlerByMap(map[string]MockResponse{ - "OffsetRequest": mockOffsetResponse1, - "FetchRequest": NewMockFetchResponse(t, 1), - }) - leader1.SetHandlerByMap(map[string]MockResponse{ - "OffsetRequest": mockOffsetResponse1, - "FetchRequest": NewMockFetchResponse(t, 1), - }) - - // launch test goroutines - config := NewConfig() - config.Consumer.Retry.Backoff = 50 - master, err := NewConsumer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - // we expect to end up (eventually) consuming exactly ten messages on each partition - var wg sync.WaitGroup - for i := int32(0); i < 2; i++ { - consumer, err := master.ConsumePartition("my_topic", i, 0) - if err != nil { - t.Error(err) - } - - go func(c PartitionConsumer) { - for err := range c.Errors() { - t.Error(err) - } - }(consumer) - - wg.Add(1) - go func(partition int32, c PartitionConsumer) { - for i := 0; i < 10; i++ { - message := <-consumer.Messages() - if message.Offset != int64(i) { - t.Error("Incorrect message offset!", i, partition, message.Offset) - } - if message.Partition != partition { - t.Error("Incorrect message partition!") - } - } - safeClose(t, consumer) - wg.Done() - }(i, consumer) - } - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 1") - // Stage 1: - // * my_topic/0 -> leader0 serves 4 messages - // * my_topic/1 -> leader1 serves 0 messages - - mockFetchResponse := NewMockFetchResponse(t, 1) - for i := 0; i < 4; i++ { - mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg) - } - leader0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse, - }) - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 2") - // Stage 2: - // * leader0 says that it is no longer serving my_topic/0 - // * seedBroker tells that leader1 is serving my_topic/0 now - - // seed broker tells that the new partition 0 leader is leader1 - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetLeader("my_topic", 0, leader1.BrokerID()). - SetLeader("my_topic", 1, leader1.BrokerID()), - }) - - // leader0 says no longer leader of partition 0 - fetchResponse := new(FetchResponse) - fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition) - leader0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": NewMockWrapper(fetchResponse), - }) - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 3") - // Stage 3: - // * my_topic/0 -> leader1 serves 3 messages - // * my_topic/1 -> leader1 server 8 messages - - // leader1 provides 3 message on partition 0, and 8 messages on partition 1 - mockFetchResponse2 := NewMockFetchResponse(t, 2) - for i := 4; i < 7; i++ { - mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg) - } - for i := 0; i < 8; i++ { - mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg) - } - leader1.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse2, - }) - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 4") - // Stage 4: - // * my_topic/0 -> leader1 serves 3 messages - // * my_topic/1 -> leader1 tells that it is no longer the leader - // * seedBroker tells that leader0 is a new leader for my_topic/1 - - // metadata assigns 0 to leader1 and 1 to leader0 - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetLeader("my_topic", 0, leader1.BrokerID()). - SetLeader("my_topic", 1, leader0.BrokerID()), - }) - - // leader1 provides three more messages on partition0, says no longer leader of partition1 - mockFetchResponse3 := NewMockFetchResponse(t, 3). - SetMessage("my_topic", 0, int64(7), testMsg). - SetMessage("my_topic", 0, int64(8), testMsg). - SetMessage("my_topic", 0, int64(9), testMsg) - fetchResponse4 := new(FetchResponse) - fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition) - leader1.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4), - }) - - // leader0 provides two messages on partition 1 - mockFetchResponse4 := NewMockFetchResponse(t, 2) - for i := 8; i < 10; i++ { - mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg) - } - leader0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse4, - }) - - wg.Wait() - safeClose(t, master) - leader1.Close() - leader0.Close() - seedBroker.Close() -} - -// When two partitions have the same broker as the leader, if one partition -// consumer channel buffer is full then that does not affect the ability to -// read messages by the other consumer. -func TestConsumerInterleavedClose(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()). - SetLeader("my_topic", 1, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 1000). - SetOffset("my_topic", 0, OffsetNewest, 1100). - SetOffset("my_topic", 1, OffsetOldest, 2000). - SetOffset("my_topic", 1, OffsetNewest, 2100), - "FetchRequest": NewMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 1000, testMsg). - SetMessage("my_topic", 0, 1001, testMsg). - SetMessage("my_topic", 0, 1002, testMsg). - SetMessage("my_topic", 1, 2000, testMsg), - }) - - config := NewConfig() - config.ChannelBufferSize = 0 - master, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - c0, err := master.ConsumePartition("my_topic", 0, 1000) - if err != nil { - t.Fatal(err) - } - - c1, err := master.ConsumePartition("my_topic", 1, 2000) - if err != nil { - t.Fatal(err) - } - - // When/Then: we can read from partition 0 even if nobody reads from partition 1 - assertMessageOffset(t, <-c0.Messages(), 1000) - assertMessageOffset(t, <-c0.Messages(), 1001) - assertMessageOffset(t, <-c0.Messages(), 1002) - - safeClose(t, c1) - safeClose(t, c0) - safeClose(t, master) - broker0.Close() -} - -func TestConsumerBounceWithReferenceOpen(t *testing.T) { - broker0 := NewMockBroker(t, 0) - broker0Addr := broker0.Addr() - broker1 := NewMockBroker(t, 1) - - mockMetadataResponse := NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetBroker(broker1.Addr(), broker1.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()). - SetLeader("my_topic", 1, broker1.BrokerID()) - - mockOffsetResponse := NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 1000). - SetOffset("my_topic", 0, OffsetNewest, 1100). - SetOffset("my_topic", 1, OffsetOldest, 2000). - SetOffset("my_topic", 1, OffsetNewest, 2100) - - mockFetchResponse := NewMockFetchResponse(t, 1) - for i := 0; i < 10; i++ { - mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg) - mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg) - } - - broker0.SetHandlerByMap(map[string]MockResponse{ - "OffsetRequest": mockOffsetResponse, - "FetchRequest": mockFetchResponse, - }) - broker1.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": mockMetadataResponse, - "OffsetRequest": mockOffsetResponse, - "FetchRequest": mockFetchResponse, - }) - - config := NewConfig() - config.Consumer.Return.Errors = true - config.Consumer.Retry.Backoff = 100 * time.Millisecond - config.ChannelBufferSize = 1 - master, err := NewConsumer([]string{broker1.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - c0, err := master.ConsumePartition("my_topic", 0, 1000) - if err != nil { - t.Fatal(err) - } - - c1, err := master.ConsumePartition("my_topic", 1, 2000) - if err != nil { - t.Fatal(err) - } - - // read messages from both partition to make sure that both brokers operate - // normally. - assertMessageOffset(t, <-c0.Messages(), 1000) - assertMessageOffset(t, <-c1.Messages(), 2000) - - // Simulate broker shutdown. Note that metadata response does not change, - // that is the leadership does not move to another broker. So partition - // consumer will keep retrying to restore the connection with the broker. - broker0.Close() - - // Make sure that while the partition/0 leader is down, consumer/partition/1 - // is capable of pulling messages from broker1. - for i := 1; i < 7; i++ { - offset := (<-c1.Messages()).Offset - if offset != int64(2000+i) { - t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i)) - } - } - - // Bring broker0 back to service. - broker0 = NewMockBrokerAddr(t, 0, broker0Addr) - broker0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse, - }) - - // Read the rest of messages from both partitions. - for i := 7; i < 10; i++ { - assertMessageOffset(t, <-c1.Messages(), int64(2000+i)) - } - for i := 1; i < 10; i++ { - assertMessageOffset(t, <-c0.Messages(), int64(1000+i)) - } - - select { - case <-c0.Errors(): - default: - t.Errorf("Partition consumer should have detected broker restart") - } - - safeClose(t, c1) - safeClose(t, c0) - safeClose(t, master) - broker0.Close() - broker1.Close() -} - -func TestConsumerOffsetOutOfRange(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 2) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 2345), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When/Then - if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange { - t.Fatal("Should return ErrOffsetOutOfRange, got:", err) - } - if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange { - t.Fatal("Should return ErrOffsetOutOfRange, got:", err) - } - if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange { - t.Fatal("Should return ErrOffsetOutOfRange, got:", err) - } - - safeClose(t, master) - broker0.Close() -} - -func TestConsumerExpiryTicker(t *testing.T) { - // Given - broker0 := NewMockBroker(t, 0) - fetchResponse1 := &FetchResponse{} - for i := 1; i <= 8; i++ { - fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i)) - } - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": NewMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": NewMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 1), - "FetchRequest": NewMockSequence(fetchResponse1), - }) - - config := NewConfig() - config.ChannelBufferSize = 0 - config.Consumer.MaxProcessingTime = 10 * time.Millisecond - master, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 1) - if err != nil { - t.Fatal(err) - } - - // Then: messages with offsets 1 through 8 are read - for i := 1; i <= 8; i++ { - assertMessageOffset(t, <-consumer.Messages(), int64(i)) - time.Sleep(2 * time.Millisecond) - } - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) { - if msg.Offset != expectedOffset { - t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset) - } -} - -// This example shows how to use the consumer to read messages -// from a single partition. -func ExampleConsumer() { - consumer, err := NewConsumer([]string{"localhost:9092"}, nil) - if err != nil { - panic(err) - } - - defer func() { - if err := consumer.Close(); err != nil { - log.Fatalln(err) - } - }() - - partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest) - if err != nil { - panic(err) - } - - defer func() { - if err := partitionConsumer.Close(); err != nil { - log.Fatalln(err) - } - }() - - // Trap SIGINT to trigger a shutdown. - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt) - - consumed := 0 -ConsumerLoop: - for { - select { - case msg := <-partitionConsumer.Messages(): - log.Printf("Consumed message offset %d\n", msg.Offset) - consumed++ - case <-signals: - break ConsumerLoop - } - } - - log.Printf("Consumed: %d\n", consumed) -} diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/Shopify/sarama/control_record.go new file mode 100644 index 00000000..9b75ab53 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/control_record.go @@ -0,0 +1,72 @@ +package sarama + +//ControlRecordType ... +type ControlRecordType int + +const ( + //ControlRecordAbort is a control record for abort + ControlRecordAbort ControlRecordType = iota + //ControlRecordCommit is a control record for commit + ControlRecordCommit + //ControlRecordUnknown is a control record of unknown type + ControlRecordUnknown +) + +// Control records are returned as a record by fetchRequest +// However unlike "normal" records, they mean nothing application wise. +// They only serve internal logic for supporting transactions. +type ControlRecord struct { + Version int16 + CoordinatorEpoch int32 + Type ControlRecordType +} + +func (cr *ControlRecord) decode(key, value packetDecoder) error { + var err error + cr.Version, err = value.getInt16() + if err != nil { + return err + } + + cr.CoordinatorEpoch, err = value.getInt32() + if err != nil { + return err + } + + // There a version for the value part AND the key part. And I have no idea if they are supposed to match or not + // Either way, all these version can only be 0 for now + cr.Version, err = key.getInt16() + if err != nil { + return err + } + + recordType, err := key.getInt16() + if err != nil { + return err + } + + switch recordType { + case 0: + cr.Type = ControlRecordAbort + case 1: + cr.Type = ControlRecordCommit + default: + // from JAVA implementation: + // UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored + cr.Type = ControlRecordUnknown + } + return nil +} + +func (cr *ControlRecord) encode(key, value packetEncoder) { + value.putInt16(cr.Version) + value.putInt32(cr.CoordinatorEpoch) + key.putInt16(cr.Version) + + switch cr.Type { + case ControlRecordAbort: + key.putInt16(0) + case ControlRecordCommit: + key.putInt16(1) + } +} diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go index 1f144431..38189a3c 100644 --- a/vendor/github.com/Shopify/sarama/crc32_field.go +++ b/vendor/github.com/Shopify/sarama/crc32_field.go @@ -4,6 +4,7 @@ import ( "encoding/binary" "fmt" "hash/crc32" + "sync" ) type crcPolynomial int8 @@ -13,6 +14,22 @@ const ( crcCastagnoli ) +var crc32FieldPool = sync.Pool{} + +func acquireCrc32Field(polynomial crcPolynomial) *crc32Field { + val := crc32FieldPool.Get() + if val != nil { + c := val.(*crc32Field) + c.polynomial = polynomial + return c + } + return newCRC32Field(polynomial) +} + +func releaseCrc32Field(c *crc32Field) { + crc32FieldPool.Put(c) +} + var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) // crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go index af321e99..46fb0440 100644 --- a/vendor/github.com/Shopify/sarama/create_partitions_request.go +++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go @@ -67,6 +67,10 @@ func (r *CreatePartitionsRequest) version() int16 { return 0 } +func (r *CreatePartitionsRequest) headerVersion() int16 { + return 1 +} + func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request_test.go b/vendor/github.com/Shopify/sarama/create_partitions_request_test.go deleted file mode 100644 index db8cb732..00000000 --- a/vendor/github.com/Shopify/sarama/create_partitions_request_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - createPartitionRequestNoAssignment = []byte{ - 0, 0, 0, 1, // one topic - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, 0, 3, // 3 partitions - 255, 255, 255, 255, // no assignments - 0, 0, 0, 100, // timeout - 0, // validate only = false - } - - createPartitionRequestAssignment = []byte{ - 0, 0, 0, 1, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, 0, 3, // 3 partitions - 0, 0, 0, 2, - 0, 0, 0, 2, - 0, 0, 0, 2, 0, 0, 0, 3, - 0, 0, 0, 2, - 0, 0, 0, 3, 0, 0, 0, 1, - 0, 0, 0, 100, - 1, // validate only = true - } -) - -func TestCreatePartitionsRequest(t *testing.T) { - req := &CreatePartitionsRequest{ - TopicPartitions: map[string]*TopicPartition{ - "topic": &TopicPartition{ - Count: 3, - }, - }, - Timeout: 100 * time.Millisecond, - } - - buf := testRequestEncode(t, "no assignment", req, createPartitionRequestNoAssignment) - testRequestDecode(t, "no assignment", req, buf) - - req.ValidateOnly = true - req.TopicPartitions["topic"].Assignment = [][]int32{{2, 3}, {3, 1}} - - buf = testRequestEncode(t, "assignment", req, createPartitionRequestAssignment) - testRequestDecode(t, "assignment", req, buf) -} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go index abd621c6..12ce7885 100644 --- a/vendor/github.com/Shopify/sarama/create_partitions_response.go +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -1,6 +1,9 @@ package sarama -import "time" +import ( + "fmt" + "time" +) type CreatePartitionsResponse struct { ThrottleTime time.Duration @@ -60,6 +63,10 @@ func (r *CreatePartitionsResponse) version() int16 { return 0 } +func (r *CreatePartitionsResponse) headerVersion() int16 { + return 0 +} + func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { return V1_0_0_0 } @@ -69,6 +76,14 @@ type TopicPartitionError struct { ErrMsg *string } +func (t *TopicPartitionError) Error() string { + text := t.Err.Error() + if t.ErrMsg != nil { + text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) + } + return text +} + func (t *TopicPartitionError) encode(pe packetEncoder) error { pe.putInt16(int16(t.Err)) diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response_test.go b/vendor/github.com/Shopify/sarama/create_partitions_response_test.go deleted file mode 100644 index 3219882c..00000000 --- a/vendor/github.com/Shopify/sarama/create_partitions_response_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package sarama - -import ( - "reflect" - "testing" - "time" -) - -var ( - createPartitionResponseSuccess = []byte{ - 0, 0, 0, 100, // throttleTimeMs - 0, 0, 0, 1, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, // no error - 255, 255, // no error message - } - - createPartitionResponseFail = []byte{ - 0, 0, 0, 100, // throttleTimeMs - 0, 0, 0, 1, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 37, // partition error - 0, 5, 'e', 'r', 'r', 'o', 'r', - } -) - -func TestCreatePartitionsResponse(t *testing.T) { - resp := &CreatePartitionsResponse{ - ThrottleTime: 100 * time.Millisecond, - TopicPartitionErrors: map[string]*TopicPartitionError{ - "topic": &TopicPartitionError{}, - }, - } - - testResponse(t, "success", resp, createPartitionResponseSuccess) - decodedresp := new(CreatePartitionsResponse) - testVersionDecodable(t, "success", decodedresp, createPartitionResponseSuccess, 0) - if !reflect.DeepEqual(decodedresp, resp) { - t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp) - } - - errMsg := "error" - resp.TopicPartitionErrors["topic"].Err = ErrInvalidPartitions - resp.TopicPartitionErrors["topic"].ErrMsg = &errMsg - - testResponse(t, "with errors", resp, createPartitionResponseFail) - decodedresp = new(CreatePartitionsResponse) - testVersionDecodable(t, "with errors", decodedresp, createPartitionResponseFail, 0) - if !reflect.DeepEqual(decodedresp, resp) { - t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp) - } -} diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go index 709c0a44..287acd06 100644 --- a/vendor/github.com/Shopify/sarama/create_topics_request.go +++ b/vendor/github.com/Shopify/sarama/create_topics_request.go @@ -79,6 +79,10 @@ func (c *CreateTopicsRequest) version() int16 { return c.Version } +func (r *CreateTopicsRequest) headerVersion() int16 { + return 1 +} + func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { switch c.Version { case 2: diff --git a/vendor/github.com/Shopify/sarama/create_topics_request_test.go b/vendor/github.com/Shopify/sarama/create_topics_request_test.go deleted file mode 100644 index 56b1b80e..00000000 --- a/vendor/github.com/Shopify/sarama/create_topics_request_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - createTopicsRequestV0 = []byte{ - 0, 0, 0, 1, - 0, 5, 't', 'o', 'p', 'i', 'c', - 255, 255, 255, 255, - 255, 255, - 0, 0, 0, 1, // 1 replica assignment - 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, - 0, 0, 0, 1, // 1 config - 0, 12, 'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's', - 0, 2, '-', '1', - 0, 0, 0, 100, - } - - createTopicsRequestV1 = append(createTopicsRequestV0, byte(1)) -) - -func TestCreateTopicsRequest(t *testing.T) { - retention := "-1" - - req := &CreateTopicsRequest{ - TopicDetails: map[string]*TopicDetail{ - "topic": { - NumPartitions: -1, - ReplicationFactor: -1, - ReplicaAssignment: map[int32][]int32{ - 0: []int32{0, 1, 2}, - }, - ConfigEntries: map[string]*string{ - "retention.ms": &retention, - }, - }, - }, - Timeout: 100 * time.Millisecond, - } - - testRequest(t, "version 0", req, createTopicsRequestV0) - - req.Version = 1 - req.ValidateOnly = true - - testRequest(t, "version 1", req, createTopicsRequestV1) -} diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go index 66207e00..7e1448a6 100644 --- a/vendor/github.com/Shopify/sarama/create_topics_response.go +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -1,6 +1,9 @@ package sarama -import "time" +import ( + "fmt" + "time" +) type CreateTopicsResponse struct { Version int16 @@ -67,6 +70,10 @@ func (c *CreateTopicsResponse) version() int16 { return c.Version } +func (c *CreateTopicsResponse) headerVersion() int16 { + return 0 +} + func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { switch c.Version { case 2: @@ -83,6 +90,14 @@ type TopicError struct { ErrMsg *string } +func (t *TopicError) Error() string { + text := t.Err.Error() + if t.ErrMsg != nil { + text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) + } + return text +} + func (t *TopicError) encode(pe packetEncoder, version int16) error { pe.putInt16(int16(t.Err)) diff --git a/vendor/github.com/Shopify/sarama/create_topics_response_test.go b/vendor/github.com/Shopify/sarama/create_topics_response_test.go deleted file mode 100644 index 53790064..00000000 --- a/vendor/github.com/Shopify/sarama/create_topics_response_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - createTopicsResponseV0 = []byte{ - 0, 0, 0, 1, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 42, - } - - createTopicsResponseV1 = []byte{ - 0, 0, 0, 1, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 42, - 0, 3, 'm', 's', 'g', - } - - createTopicsResponseV2 = []byte{ - 0, 0, 0, 100, - 0, 0, 0, 1, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 42, - 0, 3, 'm', 's', 'g', - } -) - -func TestCreateTopicsResponse(t *testing.T) { - resp := &CreateTopicsResponse{ - TopicErrors: map[string]*TopicError{ - "topic": &TopicError{ - Err: ErrInvalidRequest, - }, - }, - } - - testResponse(t, "version 0", resp, createTopicsResponseV0) - - resp.Version = 1 - msg := "msg" - resp.TopicErrors["topic"].ErrMsg = &msg - - testResponse(t, "version 1", resp, createTopicsResponseV1) - - resp.Version = 2 - resp.ThrottleTime = 100 * time.Millisecond - - testResponse(t, "version 2", resp, createTopicsResponseV2) -} diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go new file mode 100644 index 00000000..e4dc3c18 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/decompress.go @@ -0,0 +1,63 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +var ( + lz4ReaderPool = sync.Pool{ + New: func() interface{} { + return lz4.NewReader(nil) + }, + } + + gzipReaderPool sync.Pool +) + +func decompress(cc CompressionCodec, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var ( + err error + reader *gzip.Reader + readerIntf = gzipReaderPool.Get() + ) + if readerIntf != nil { + reader = readerIntf.(*gzip.Reader) + } else { + reader, err = gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + } + + defer gzipReaderPool.Put(reader) + + if err := reader.Reset(bytes.NewReader(data)); err != nil { + return nil, err + } + + return ioutil.ReadAll(reader) + case CompressionSnappy: + return snappy.Decode(data) + case CompressionLZ4: + reader := lz4ReaderPool.Get().(*lz4.Reader) + defer lz4ReaderPool.Put(reader) + + reader.Reset(bytes.NewReader(data)) + return ioutil.ReadAll(reader) + case CompressionZSTD: + return zstdDecompress(nil, data) + default: + return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go index 305a324a..4ac8bbee 100644 --- a/vendor/github.com/Shopify/sarama/delete_groups_request.go +++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go @@ -21,6 +21,10 @@ func (r *DeleteGroupsRequest) version() int16 { return 0 } +func (r *DeleteGroupsRequest) headerVersion() int16 { + return 1 +} + func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion { return V1_1_0_0 } diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request_test.go b/vendor/github.com/Shopify/sarama/delete_groups_request_test.go deleted file mode 100644 index 90817249..00000000 --- a/vendor/github.com/Shopify/sarama/delete_groups_request_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyDeleteGroupsRequest = []byte{0, 0, 0, 0} - - singleDeleteGroupsRequest = []byte{ - 0, 0, 0, 1, // 1 group - 0, 3, 'f', 'o', 'o', // group name: foo - } - - doubleDeleteGroupsRequest = []byte{ - 0, 0, 0, 2, // 2 groups - 0, 3, 'f', 'o', 'o', // group name: foo - 0, 3, 'b', 'a', 'r', // group name: foo - } -) - -func TestDeleteGroupsRequest(t *testing.T) { - var request *DeleteGroupsRequest - - request = new(DeleteGroupsRequest) - testRequest(t, "no groups", request, emptyDeleteGroupsRequest) - - request = new(DeleteGroupsRequest) - request.AddGroup("foo") - testRequest(t, "one group", request, singleDeleteGroupsRequest) - - request = new(DeleteGroupsRequest) - request.AddGroup("foo") - request.AddGroup("bar") - testRequest(t, "two groups", request, doubleDeleteGroupsRequest) -} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go index c067ebb4..5e7b1ed3 100644 --- a/vendor/github.com/Shopify/sarama/delete_groups_response.go +++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go @@ -65,6 +65,10 @@ func (r *DeleteGroupsResponse) version() int16 { return 0 } +func (r *DeleteGroupsResponse) headerVersion() int16 { + return 0 +} + func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion { return V1_1_0_0 } diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response_test.go b/vendor/github.com/Shopify/sarama/delete_groups_response_test.go deleted file mode 100644 index 6f622b5f..00000000 --- a/vendor/github.com/Shopify/sarama/delete_groups_response_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - emptyDeleteGroupsResponse = []byte{ - 0, 0, 0, 0, // does not violate any quota - 0, 0, 0, 0, // no groups - } - - errorDeleteGroupsResponse = []byte{ - 0, 0, 0, 0, // does not violate any quota - 0, 0, 0, 1, // 1 group - 0, 3, 'f', 'o', 'o', // group name - 0, 31, // error ErrClusterAuthorizationFailed - } - - noErrorDeleteGroupsResponse = []byte{ - 0, 0, 0, 0, // does not violate any quota - 0, 0, 0, 1, // 1 group - 0, 3, 'f', 'o', 'o', // group name - 0, 0, // no error - } -) - -func TestDeleteGroupsResponse(t *testing.T) { - var response *DeleteGroupsResponse - - response = new(DeleteGroupsResponse) - testVersionDecodable(t, "empty", response, emptyDeleteGroupsResponse, 0) - if response.ThrottleTime != 0 { - t.Error("Expected no violation") - } - if len(response.GroupErrorCodes) != 0 { - t.Error("Expected no groups") - } - - response = new(DeleteGroupsResponse) - testVersionDecodable(t, "error", response, errorDeleteGroupsResponse, 0) - if response.ThrottleTime != 0 { - t.Error("Expected no violation") - } - if response.GroupErrorCodes["foo"] != ErrClusterAuthorizationFailed { - t.Error("Expected error ErrClusterAuthorizationFailed, found:", response.GroupErrorCodes["foo"]) - } - - response = new(DeleteGroupsResponse) - testVersionDecodable(t, "no error", response, noErrorDeleteGroupsResponse, 0) - if response.ThrottleTime != 0 { - t.Error("Expected no violation") - } - if response.GroupErrorCodes["foo"] != ErrNoError { - t.Error("Expected error ErrClusterAuthorizationFailed, found:", response.GroupErrorCodes["foo"]) - } -} diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go index 93efafd4..dc106b17 100644 --- a/vendor/github.com/Shopify/sarama/delete_records_request.go +++ b/vendor/github.com/Shopify/sarama/delete_records_request.go @@ -77,6 +77,10 @@ func (d *DeleteRecordsRequest) version() int16 { return 0 } +func (d *DeleteRecordsRequest) headerVersion() int16 { + return 1 +} + func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/delete_records_request_test.go b/vendor/github.com/Shopify/sarama/delete_records_request_test.go deleted file mode 100644 index c72960cf..00000000 --- a/vendor/github.com/Shopify/sarama/delete_records_request_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var deleteRecordsRequest = []byte{ - 0, 0, 0, 2, - 0, 5, 'o', 't', 'h', 'e', 'r', - 0, 0, 0, 0, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, 0, 2, - 0, 0, 0, 19, - 0, 0, 0, 0, 0, 0, 0, 200, - 0, 0, 0, 20, - 0, 0, 0, 0, 0, 0, 0, 190, - 0, 0, 0, 100, -} - -func TestDeleteRecordsRequest(t *testing.T) { - req := &DeleteRecordsRequest{ - Topics: map[string]*DeleteRecordsRequestTopic{ - "topic": { - PartitionOffsets: map[int32]int64{ - 19: 200, - 20: 190, - }, - }, - "other": {}, - }, - Timeout: 100 * time.Millisecond, - } - - testRequest(t, "", req, deleteRecordsRequest) -} diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go index 733a58b6..d530b4c7 100644 --- a/vendor/github.com/Shopify/sarama/delete_records_response.go +++ b/vendor/github.com/Shopify/sarama/delete_records_response.go @@ -80,6 +80,10 @@ func (d *DeleteRecordsResponse) version() int16 { return 0 } +func (d *DeleteRecordsResponse) headerVersion() int16 { + return 0 +} + func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/delete_records_response_test.go b/vendor/github.com/Shopify/sarama/delete_records_response_test.go deleted file mode 100644 index 3653cdc4..00000000 --- a/vendor/github.com/Shopify/sarama/delete_records_response_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var deleteRecordsResponse = []byte{ - 0, 0, 0, 100, - 0, 0, 0, 2, - 0, 5, 'o', 't', 'h', 'e', 'r', - 0, 0, 0, 0, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, 0, 2, - 0, 0, 0, 19, - 0, 0, 0, 0, 0, 0, 0, 200, - 0, 0, - 0, 0, 0, 20, - 255, 255, 255, 255, 255, 255, 255, 255, - 0, 3, -} - -func TestDeleteRecordsResponse(t *testing.T) { - resp := &DeleteRecordsResponse{ - Version: 0, - ThrottleTime: 100 * time.Millisecond, - Topics: map[string]*DeleteRecordsResponseTopic{ - "topic": { - Partitions: map[int32]*DeleteRecordsResponsePartition{ - 19: {LowWatermark: 200, Err: 0}, - 20: {LowWatermark: -1, Err: 3}, - }, - }, - "other": {}, - }, - } - - testResponse(t, "", resp, deleteRecordsResponse) -} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go index 911f67d3..ba6780a8 100644 --- a/vendor/github.com/Shopify/sarama/delete_topics_request.go +++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go @@ -38,6 +38,10 @@ func (d *DeleteTopicsRequest) version() int16 { return d.Version } +func (d *DeleteTopicsRequest) headerVersion() int16 { + return 1 +} + func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request_test.go b/vendor/github.com/Shopify/sarama/delete_topics_request_test.go deleted file mode 100644 index c313a2f3..00000000 --- a/vendor/github.com/Shopify/sarama/delete_topics_request_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var deleteTopicsRequest = []byte{ - 0, 0, 0, 2, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 5, 'o', 't', 'h', 'e', 'r', - 0, 0, 0, 100, -} - -func TestDeleteTopicsRequestV0(t *testing.T) { - req := &DeleteTopicsRequest{ - Version: 0, - Topics: []string{"topic", "other"}, - Timeout: 100 * time.Millisecond, - } - - testRequest(t, "", req, deleteTopicsRequest) -} - -func TestDeleteTopicsRequestV1(t *testing.T) { - req := &DeleteTopicsRequest{ - Version: 1, - Topics: []string{"topic", "other"}, - Timeout: 100 * time.Millisecond, - } - - testRequest(t, "", req, deleteTopicsRequest) -} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go index 34225460..733961a8 100644 --- a/vendor/github.com/Shopify/sarama/delete_topics_response.go +++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go @@ -68,6 +68,10 @@ func (d *DeleteTopicsResponse) version() int16 { return d.Version } +func (d *DeleteTopicsResponse) headerVersion() int16 { + return 0 +} + func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response_test.go b/vendor/github.com/Shopify/sarama/delete_topics_response_test.go deleted file mode 100644 index 516f1a3b..00000000 --- a/vendor/github.com/Shopify/sarama/delete_topics_response_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - deleteTopicsResponseV0 = []byte{ - 0, 0, 0, 1, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, - } - - deleteTopicsResponseV1 = []byte{ - 0, 0, 0, 100, - 0, 0, 0, 1, - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, - } -) - -func TestDeleteTopicsResponse(t *testing.T) { - resp := &DeleteTopicsResponse{ - TopicErrorCodes: map[string]KError{ - "topic": ErrNoError, - }, - } - - testResponse(t, "version 0", resp, deleteTopicsResponseV0) - - resp.Version = 1 - resp.ThrottleTime = 100 * time.Millisecond - - testResponse(t, "version 1", resp, deleteTopicsResponseV1) -} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go index 7a7cffc3..d0c73528 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_request.go +++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go @@ -1,15 +1,17 @@ package sarama +type DescribeConfigsRequest struct { + Version int16 + Resources []*ConfigResource + IncludeSynonyms bool +} + type ConfigResource struct { Type ConfigResourceType Name string ConfigNames []string } -type DescribeConfigsRequest struct { - Resources []*ConfigResource -} - func (r *DescribeConfigsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(r.Resources)); err != nil { return err @@ -30,6 +32,10 @@ func (r *DescribeConfigsRequest) encode(pe packetEncoder) error { } } + if r.Version >= 1 { + pe.putBool(r.IncludeSynonyms) + } + return nil } @@ -74,6 +80,14 @@ func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err er } r.Resources[i].ConfigNames = cfnames } + r.Version = version + if r.Version >= 1 { + b, err := pd.getBool() + if err != nil { + return err + } + r.IncludeSynonyms = b + } return nil } @@ -83,9 +97,20 @@ func (r *DescribeConfigsRequest) key() int16 { } func (r *DescribeConfigsRequest) version() int16 { - return 0 + return r.Version +} + +func (r *DescribeConfigsRequest) headerVersion() int16 { + return 1 } func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch r.Version { + case 1: + return V1_1_0_0 + case 2: + return V2_0_0_0 + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request_test.go b/vendor/github.com/Shopify/sarama/describe_configs_request_test.go deleted file mode 100644 index ca0fd049..00000000 --- a/vendor/github.com/Shopify/sarama/describe_configs_request_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyDescribeConfigsRequest = []byte{ - 0, 0, 0, 0, // 0 configs - } - - singleDescribeConfigsRequest = []byte{ - 0, 0, 0, 1, // 1 config - 2, // a topic - 0, 3, 'f', 'o', 'o', // topic name: foo - 0, 0, 0, 1, //1 config name - 0, 10, // 10 chars - 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', - } - - doubleDescribeConfigsRequest = []byte{ - 0, 0, 0, 2, // 2 configs - 2, // a topic - 0, 3, 'f', 'o', 'o', // topic name: foo - 0, 0, 0, 2, //2 config name - 0, 10, // 10 chars - 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', - 0, 12, // 12 chars - 'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's', - 2, // a topic - 0, 3, 'b', 'a', 'r', // topic name: foo - 0, 0, 0, 1, // 1 config - 0, 10, // 10 chars - 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', - } - - singleDescribeConfigsRequestAllConfigs = []byte{ - 0, 0, 0, 1, // 1 config - 2, // a topic - 0, 3, 'f', 'o', 'o', // topic name: foo - 255, 255, 255, 255, // no configs - } -) - -func TestDescribeConfigsRequest(t *testing.T) { - var request *DescribeConfigsRequest - - request = &DescribeConfigsRequest{ - Resources: []*ConfigResource{}, - } - testRequest(t, "no requests", request, emptyDescribeConfigsRequest) - - configs := []string{"segment.ms"} - request = &DescribeConfigsRequest{ - Resources: []*ConfigResource{ - &ConfigResource{ - Type: TopicResource, - Name: "foo", - ConfigNames: configs, - }, - }, - } - - testRequest(t, "one config", request, singleDescribeConfigsRequest) - - request = &DescribeConfigsRequest{ - Resources: []*ConfigResource{ - &ConfigResource{ - Type: TopicResource, - Name: "foo", - ConfigNames: []string{"segment.ms", "retention.ms"}, - }, - &ConfigResource{ - Type: TopicResource, - Name: "bar", - ConfigNames: []string{"segment.ms"}, - }, - }, - } - testRequest(t, "two configs", request, doubleDescribeConfigsRequest) - - request = &DescribeConfigsRequest{ - Resources: []*ConfigResource{ - &ConfigResource{ - Type: TopicResource, - Name: "foo", - }, - }, - } - - testRequest(t, "one topic, all configs", request, singleDescribeConfigsRequestAllConfigs) -} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go index 6e5d30e4..063ae911 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_response.go +++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go @@ -1,8 +1,41 @@ package sarama -import "time" +import ( + "fmt" + "time" +) + +type ConfigSource int8 + +func (s ConfigSource) String() string { + switch s { + case SourceUnknown: + return "Unknown" + case SourceTopic: + return "Topic" + case SourceDynamicBroker: + return "DynamicBroker" + case SourceDynamicDefaultBroker: + return "DynamicDefaultBroker" + case SourceStaticBroker: + return "StaticBroker" + case SourceDefault: + return "Default" + } + return fmt.Sprintf("Source Invalid: %d", int(s)) +} + +const ( + SourceUnknown ConfigSource = iota + SourceTopic + SourceDynamicBroker + SourceDynamicDefaultBroker + SourceStaticBroker + SourceDefault +) type DescribeConfigsResponse struct { + Version int16 ThrottleTime time.Duration Resources []*ResourceResponse } @@ -20,7 +53,15 @@ type ConfigEntry struct { Value string ReadOnly bool Default bool + Source ConfigSource Sensitive bool + Synonyms []*ConfigSynonym +} + +type ConfigSynonym struct { + ConfigName string + ConfigValue string + Source ConfigSource } func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) { @@ -30,14 +71,16 @@ func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) { } for _, c := range r.Resources { - if err = c.encode(pe); err != nil { + if err = c.encode(pe, r.Version); err != nil { return err } } + return nil } func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version throttleTime, err := pd.getInt32() if err != nil { return err @@ -66,14 +109,25 @@ func (r *DescribeConfigsResponse) key() int16 { } func (r *DescribeConfigsResponse) version() int16 { + return r.Version +} + +func (r *DescribeConfigsResponse) headerVersion() int16 { return 0 } func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch r.Version { + case 1: + return V1_0_0_0 + case 2: + return V2_0_0_0 + default: + return V0_11_0_0 + } } -func (r *ResourceResponse) encode(pe packetEncoder) (err error) { +func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(r.ErrorCode) if err = pe.putString(r.ErrorMsg); err != nil { @@ -91,7 +145,7 @@ func (r *ResourceResponse) encode(pe packetEncoder) (err error) { } for _, c := range r.Configs { - if err = c.encode(pe); err != nil { + if err = c.encode(pe, version); err != nil { return err } } @@ -139,7 +193,7 @@ func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) { return nil } -func (r *ConfigEntry) encode(pe packetEncoder) (err error) { +func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) { if err = pe.putString(r.Name); err != nil { return err } @@ -149,12 +203,32 @@ func (r *ConfigEntry) encode(pe packetEncoder) (err error) { } pe.putBool(r.ReadOnly) - pe.putBool(r.Default) - pe.putBool(r.Sensitive) + + if version <= 0 { + pe.putBool(r.Default) + pe.putBool(r.Sensitive) + } else { + pe.putInt8(int8(r.Source)) + pe.putBool(r.Sensitive) + + if err := pe.putArrayLength(len(r.Synonyms)); err != nil { + return err + } + for _, c := range r.Synonyms { + if err = c.encode(pe, version); err != nil { + return err + } + } + } + return nil } +//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { + if version == 0 { + r.Source = SourceUnknown + } name, err := pd.getString() if err != nil { return err @@ -173,16 +247,81 @@ func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { } r.ReadOnly = read - de, err := pd.getBool() - if err != nil { - return err + if version == 0 { + defaultB, err := pd.getBool() + if err != nil { + return err + } + r.Default = defaultB + if defaultB { + r.Source = SourceDefault + } + } else { + source, err := pd.getInt8() + if err != nil { + return err + } + r.Source = ConfigSource(source) + r.Default = r.Source == SourceDefault } - r.Default = de sensitive, err := pd.getBool() if err != nil { return err } r.Sensitive = sensitive + + if version > 0 { + n, err := pd.getArrayLength() + if err != nil { + return err + } + r.Synonyms = make([]*ConfigSynonym, n) + + for i := 0; i < n; i++ { + s := &ConfigSynonym{} + if err := s.decode(pd, version); err != nil { + return err + } + r.Synonyms[i] = s + } + } + return nil +} + +func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) { + err = pe.putString(c.ConfigName) + if err != nil { + return err + } + + err = pe.putString(c.ConfigValue) + if err != nil { + return err + } + + pe.putInt8(int8(c.Source)) + + return nil +} + +func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error { + name, err := pd.getString() + if err != nil { + return nil + } + c.ConfigName = name + + value, err := pd.getString() + if err != nil { + return nil + } + c.ConfigValue = value + + source, err := pd.getInt8() + if err != nil { + return nil + } + c.Source = ConfigSource(source) return nil } diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response_test.go b/vendor/github.com/Shopify/sarama/describe_configs_response_test.go deleted file mode 100644 index e3dcbac3..00000000 --- a/vendor/github.com/Shopify/sarama/describe_configs_response_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - describeConfigsResponseEmpty = []byte{ - 0, 0, 0, 0, //throttle - 0, 0, 0, 0, // no configs - } - - describeConfigsResponsePopulated = []byte{ - 0, 0, 0, 0, //throttle - 0, 0, 0, 1, // response - 0, 0, //errorcode - 0, 0, //string - 2, // topic - 0, 3, 'f', 'o', 'o', - 0, 0, 0, 1, //configs - 0, 10, 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's', - 0, 4, '1', '0', '0', '0', - 0, // ReadOnly - 0, // Default - 0, // Sensitive - } -) - -func TestDescribeConfigsResponse(t *testing.T) { - var response *DescribeConfigsResponse - - response = &DescribeConfigsResponse{ - Resources: []*ResourceResponse{}, - } - testVersionDecodable(t, "empty", response, describeConfigsResponseEmpty, 0) - if len(response.Resources) != 0 { - t.Error("Expected no groups") - } - - response = &DescribeConfigsResponse{ - Resources: []*ResourceResponse{ - &ResourceResponse{ - ErrorCode: 0, - ErrorMsg: "", - Type: TopicResource, - Name: "foo", - Configs: []*ConfigEntry{ - &ConfigEntry{ - Name: "segment.ms", - Value: "1000", - ReadOnly: false, - Default: false, - Sensitive: false, - }, - }, - }, - }, - } - testResponse(t, "response with error", response, describeConfigsResponsePopulated) -} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go index 1fb35677..f8962da5 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go @@ -21,6 +21,10 @@ func (r *DescribeGroupsRequest) version() int16 { return 0 } +func (r *DescribeGroupsRequest) headerVersion() int16 { + return 1 +} + func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request_test.go b/vendor/github.com/Shopify/sarama/describe_groups_request_test.go deleted file mode 100644 index 7d45f3fe..00000000 --- a/vendor/github.com/Shopify/sarama/describe_groups_request_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyDescribeGroupsRequest = []byte{0, 0, 0, 0} - - singleDescribeGroupsRequest = []byte{ - 0, 0, 0, 1, // 1 group - 0, 3, 'f', 'o', 'o', // group name: foo - } - - doubleDescribeGroupsRequest = []byte{ - 0, 0, 0, 2, // 2 groups - 0, 3, 'f', 'o', 'o', // group name: foo - 0, 3, 'b', 'a', 'r', // group name: foo - } -) - -func TestDescribeGroupsRequest(t *testing.T) { - var request *DescribeGroupsRequest - - request = new(DescribeGroupsRequest) - testRequest(t, "no groups", request, emptyDescribeGroupsRequest) - - request = new(DescribeGroupsRequest) - request.AddGroup("foo") - testRequest(t, "one group", request, singleDescribeGroupsRequest) - - request = new(DescribeGroupsRequest) - request.AddGroup("foo") - request.AddGroup("bar") - testRequest(t, "two groups", request, doubleDescribeGroupsRequest) -} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go index 542b3a97..bc242e42 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -43,6 +43,10 @@ func (r *DescribeGroupsResponse) version() int16 { return 0 } +func (r *DescribeGroupsResponse) headerVersion() int16 { + return 0 +} + func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response_test.go b/vendor/github.com/Shopify/sarama/describe_groups_response_test.go deleted file mode 100644 index dd397319..00000000 --- a/vendor/github.com/Shopify/sarama/describe_groups_response_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package sarama - -import ( - "reflect" - "testing" -) - -var ( - describeGroupsResponseEmpty = []byte{ - 0, 0, 0, 0, // no groups - } - - describeGroupsResponsePopulated = []byte{ - 0, 0, 0, 2, // 2 groups - - 0, 0, // no error - 0, 3, 'f', 'o', 'o', // Group ID - 0, 3, 'b', 'a', 'r', // State - 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // ConsumerProtocol type - 0, 3, 'b', 'a', 'z', // Protocol name - 0, 0, 0, 1, // 1 member - 0, 2, 'i', 'd', // Member ID - 0, 6, 's', 'a', 'r', 'a', 'm', 'a', // Client ID - 0, 9, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', // Client Host - 0, 0, 0, 3, 0x01, 0x02, 0x03, // MemberMetadata - 0, 0, 0, 3, 0x04, 0x05, 0x06, // MemberAssignment - - 0, 30, // ErrGroupAuthorizationFailed - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, 0, 0, - } -) - -func TestDescribeGroupsResponse(t *testing.T) { - var response *DescribeGroupsResponse - - response = new(DescribeGroupsResponse) - testVersionDecodable(t, "empty", response, describeGroupsResponseEmpty, 0) - if len(response.Groups) != 0 { - t.Error("Expected no groups") - } - - response = new(DescribeGroupsResponse) - testVersionDecodable(t, "populated", response, describeGroupsResponsePopulated, 0) - if len(response.Groups) != 2 { - t.Error("Expected two groups") - } - - group0 := response.Groups[0] - if group0.Err != ErrNoError { - t.Error("Unxpected groups[0].Err, found", group0.Err) - } - if group0.GroupId != "foo" { - t.Error("Unxpected groups[0].GroupId, found", group0.GroupId) - } - if group0.State != "bar" { - t.Error("Unxpected groups[0].State, found", group0.State) - } - if group0.ProtocolType != "consumer" { - t.Error("Unxpected groups[0].ProtocolType, found", group0.ProtocolType) - } - if group0.Protocol != "baz" { - t.Error("Unxpected groups[0].Protocol, found", group0.Protocol) - } - if len(group0.Members) != 1 { - t.Error("Unxpected groups[0].Members, found", group0.Members) - } - if group0.Members["id"].ClientId != "sarama" { - t.Error("Unxpected groups[0].Members[id].ClientId, found", group0.Members["id"].ClientId) - } - if group0.Members["id"].ClientHost != "localhost" { - t.Error("Unxpected groups[0].Members[id].ClientHost, found", group0.Members["id"].ClientHost) - } - if !reflect.DeepEqual(group0.Members["id"].MemberMetadata, []byte{0x01, 0x02, 0x03}) { - t.Error("Unxpected groups[0].Members[id].MemberMetadata, found", group0.Members["id"].MemberMetadata) - } - if !reflect.DeepEqual(group0.Members["id"].MemberAssignment, []byte{0x04, 0x05, 0x06}) { - t.Error("Unxpected groups[0].Members[id].MemberAssignment, found", group0.Members["id"].MemberAssignment) - } - - group1 := response.Groups[1] - if group1.Err != ErrGroupAuthorizationFailed { - t.Error("Unxpected groups[1].Err, found", group0.Err) - } - if len(group1.Members) != 0 { - t.Error("Unxpected groups[1].Members, found", group0.Members) - } -} diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go new file mode 100644 index 00000000..c0bf04e0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go @@ -0,0 +1,87 @@ +package sarama + +// DescribeLogDirsRequest is a describe request to get partitions' log size +type DescribeLogDirsRequest struct { + // Version 0 and 1 are equal + // The version number is bumped to indicate that on quota violation brokers send out responses before throttling. + Version int16 + + // If this is an empty array, all topics will be queried + DescribeTopics []DescribeLogDirsRequestTopic +} + +// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic +type DescribeLogDirsRequestTopic struct { + Topic string + PartitionIDs []int32 +} + +func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error { + length := len(r.DescribeTopics) + if length == 0 { + // In order to query all topics we must send null + length = -1 + } + + if err := pe.putArrayLength(length); err != nil { + return err + } + + for _, d := range r.DescribeTopics { + if err := pe.putString(d.Topic); err != nil { + return err + } + + if err := pe.putInt32Array(d.PartitionIDs); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == -1 { + n = 0 + } + + topics := make([]DescribeLogDirsRequestTopic, n) + for i := 0; i < n; i++ { + topics[i] = DescribeLogDirsRequestTopic{} + + topic, err := pd.getString() + if err != nil { + return err + } + topics[i].Topic = topic + + pIDs, err := pd.getInt32Array() + if err != nil { + return err + } + topics[i].PartitionIDs = pIDs + } + r.DescribeTopics = topics + + return nil +} + +func (r *DescribeLogDirsRequest) key() int16 { + return 35 +} + +func (r *DescribeLogDirsRequest) version() int16 { + return r.Version +} + +func (r *DescribeLogDirsRequest) headerVersion() int16 { + return 1 +} + +func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go new file mode 100644 index 00000000..411da38a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go @@ -0,0 +1,229 @@ +package sarama + +import "time" + +type DescribeLogDirsResponse struct { + ThrottleTime time.Duration + + // Version 0 and 1 are equal + // The version number is bumped to indicate that on quota violation brokers send out responses before throttling. + Version int16 + + LogDirs []DescribeLogDirsResponseDirMetadata +} + +func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(r.LogDirs)); err != nil { + return err + } + + for _, dir := range r.LogDirs { + if err := dir.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + // Decode array of DescribeLogDirsResponseDirMetadata + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n) + for i := 0; i < n; i++ { + dir := DescribeLogDirsResponseDirMetadata{} + if err := dir.decode(pd, version); err != nil { + return err + } + r.LogDirs[i] = dir + } + + return nil +} + +func (r *DescribeLogDirsResponse) key() int16 { + return 35 +} + +func (r *DescribeLogDirsResponse) version() int16 { + return r.Version +} + +func (r *DescribeLogDirsResponse) headerVersion() int16 { + return 0 +} + +func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type DescribeLogDirsResponseDirMetadata struct { + ErrorCode KError + + // The absolute log directory path + Path string + Topics []DescribeLogDirsResponseTopic +} + +func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error { + pe.putInt16(int16(r.ErrorCode)) + + if err := pe.putString(r.Path); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Topics)); err != nil { + return err + } + for _, topic := range r.Topics { + if err := topic.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error { + errCode, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = KError(errCode) + + path, err := pd.getString() + if err != nil { + return err + } + r.Path = path + + // Decode array of DescribeLogDirsResponseTopic + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Topics = make([]DescribeLogDirsResponseTopic, n) + for i := 0; i < n; i++ { + t := DescribeLogDirsResponseTopic{} + + if err := t.decode(pd, version); err != nil { + return err + } + + r.Topics[i] = t + } + + return nil +} + +// DescribeLogDirsResponseTopic contains a topic's partitions descriptions +type DescribeLogDirsResponseTopic struct { + Topic string + Partitions []DescribeLogDirsResponsePartition +} + +func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error { + if err := pe.putString(r.Topic); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Partitions)); err != nil { + return err + } + for _, partition := range r.Partitions { + if err := partition.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error { + t, err := pd.getString() + if err != nil { + return err + } + r.Topic = t + + n, err := pd.getArrayLength() + if err != nil { + return err + } + r.Partitions = make([]DescribeLogDirsResponsePartition, n) + for i := 0; i < n; i++ { + p := DescribeLogDirsResponsePartition{} + if err := p.decode(pd, version); err != nil { + return err + } + r.Partitions[i] = p + } + + return nil +} + +// DescribeLogDirsResponsePartition describes a partition's log directory +type DescribeLogDirsResponsePartition struct { + PartitionID int32 + + // The size of the log segments of the partition in bytes. + Size int64 + + // The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or + // current replica's LEO (if it is the future log for the partition) + OffsetLag int64 + + // True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of + // the replica in the future. + IsTemporary bool +} + +func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error { + pe.putInt32(r.PartitionID) + pe.putInt64(r.Size) + pe.putInt64(r.OffsetLag) + pe.putBool(r.IsTemporary) + + return nil +} + +func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error { + pID, err := pd.getInt32() + if err != nil { + return err + } + r.PartitionID = pID + + size, err := pd.getInt64() + if err != nil { + return err + } + r.Size = size + + lag, err := pd.getInt64() + if err != nil { + return err + } + r.OffsetLag = lag + + isTemp, err := pd.getBool() + if err != nil { + return err + } + r.IsTemporary = isTemp + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml index 97eed3ad..bfd295e9 100644 --- a/vendor/github.com/Shopify/sarama/dev.yml +++ b/vendor/github.com/Shopify/sarama/dev.yml @@ -2,7 +2,7 @@ name: sarama up: - go: - version: '1.11' + version: '1.15.6' commands: test: diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/Shopify/sarama/docker-compose.yml new file mode 100644 index 00000000..1a184fbe --- /dev/null +++ b/vendor/github.com/Shopify/sarama/docker-compose.yml @@ -0,0 +1,134 @@ +version: '3.7' +services: + zookeeper-1: + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.0.1}' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '1' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + zookeeper-2: + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.0.1}' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '2' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + zookeeper-3: + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.0.1}' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '3' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + kafka-1: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.0.1}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '1' + KAFKA_BROKER_RACK: '1' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-2: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.0.1}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '2' + KAFKA_BROKER_RACK: '2' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-3: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.0.1}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '3' + KAFKA_BROKER_RACK: '3' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-4: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.0.1}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '4' + KAFKA_BROKER_RACK: '4' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-5: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.0.1}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '5' + KAFKA_BROKER_RACK: '5' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + toxiproxy: + image: 'shopify/toxiproxy:2.1.4' + ports: + # The tests themselves actually start the proies on these ports + - '29091:29091' + - '29092:29092' + - '29093:29093' + - '29094:29094' + - '29095:29095' + # This is the toxiproxy API port + - '8474:8474' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go index 7ce3bc0f..025bad61 100644 --- a/vendor/github.com/Shopify/sarama/encoder_decoder.go +++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -12,6 +12,11 @@ type encoder interface { encode(pe packetEncoder) error } +type encoderWithHeader interface { + encoder + headerVersion() int16 +} + // Encode takes an Encoder and turns it into bytes while potentially recording metrics. func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { if e == nil { diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go index 2cd9b506..6635425d 100644 --- a/vendor/github.com/Shopify/sarama/end_txn_request.go +++ b/vendor/github.com/Shopify/sarama/end_txn_request.go @@ -45,6 +45,10 @@ func (a *EndTxnRequest) version() int16 { return 0 } +func (r *EndTxnRequest) headerVersion() int16 { + return 1 +} + func (a *EndTxnRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/end_txn_request_test.go b/vendor/github.com/Shopify/sarama/end_txn_request_test.go deleted file mode 100644 index 20e404eb..00000000 --- a/vendor/github.com/Shopify/sarama/end_txn_request_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package sarama - -import "testing" - -var ( - endTxnRequest = []byte{ - 0, 3, 't', 'x', 'n', - 0, 0, 0, 0, 0, 0, 31, 64, - 0, 1, - 1, - } -) - -func TestEndTxnRequest(t *testing.T) { - req := &EndTxnRequest{ - TransactionalID: "txn", - ProducerID: 8000, - ProducerEpoch: 1, - TransactionResult: true, - } - - testRequest(t, "", req, endTxnRequest) -} diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go index 33b27e33..76397672 100644 --- a/vendor/github.com/Shopify/sarama/end_txn_response.go +++ b/vendor/github.com/Shopify/sarama/end_txn_response.go @@ -39,6 +39,10 @@ func (e *EndTxnResponse) version() int16 { return 0 } +func (r *EndTxnResponse) headerVersion() int16 { + return 0 +} + func (e *EndTxnResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/end_txn_response_test.go b/vendor/github.com/Shopify/sarama/end_txn_response_test.go deleted file mode 100644 index 41d73041..00000000 --- a/vendor/github.com/Shopify/sarama/end_txn_response_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - endTxnResponse = []byte{ - 0, 0, 0, 100, - 0, 49, - } -) - -func TestEndTxnResponse(t *testing.T) { - resp := &EndTxnResponse{ - ThrottleTime: 100 * time.Millisecond, - Err: ErrInvalidProducerIDMapping, - } - - testResponse(t, "", resp, endTxnResponse) -} diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go index c578ef5f..5781c1c0 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -9,6 +9,9 @@ import ( // or otherwise failed to respond. var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") +// ErrBrokerNotFound is the error returned when there's no broker found for the requested ID. +var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found") + // ErrClosedClient is the error returned when a method is called on a client that has been closed. var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") @@ -81,6 +84,44 @@ func (err ConfigurationError) Error() string { // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes type KError int16 +// MultiError is used to contain multi error. +type MultiError struct { + Errors *[]error +} + +func (mErr MultiError) Error() string { + var errString = "" + for _, err := range *mErr.Errors { + errString += err.Error() + "," + } + return errString +} + +func (mErr MultiError) PrettyError() string { + var errString = "" + for _, err := range *mErr.Errors { + errString += err.Error() + "\n" + } + return errString +} + +// ErrDeleteRecords is the type of error returned when fail to delete the required records +type ErrDeleteRecords struct { + MultiError +} + +func (err ErrDeleteRecords) Error() string { + return "kafka server: failed to delete records " + err.MultiError.Error() +} + +type ErrReassignPartitions struct { + MultiError +} + +func (err ErrReassignPartitions) Error() string { + return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError()) +} + // Numeric error codes returned by the Kafka server. const ( ErrNoError KError = 0 @@ -145,6 +186,28 @@ const ( ErrSASLAuthenticationFailed KError = 58 ErrUnknownProducerID KError = 59 ErrReassignmentInProgress KError = 60 + ErrDelegationTokenAuthDisabled KError = 61 + ErrDelegationTokenNotFound KError = 62 + ErrDelegationTokenOwnerMismatch KError = 63 + ErrDelegationTokenRequestNotAllowed KError = 64 + ErrDelegationTokenAuthorizationFailed KError = 65 + ErrDelegationTokenExpired KError = 66 + ErrInvalidPrincipalType KError = 67 + ErrNonEmptyGroup KError = 68 + ErrGroupIDNotFound KError = 69 + ErrFetchSessionIDNotFound KError = 70 + ErrInvalidFetchSessionEpoch KError = 71 + ErrListenerNotFound KError = 72 + ErrTopicDeletionDisabled KError = 73 + ErrFencedLeaderEpoch KError = 74 + ErrUnknownLeaderEpoch KError = 75 + ErrUnsupportedCompressionType KError = 76 + ErrStaleBrokerEpoch KError = 77 + ErrOffsetNotAvailable KError = 78 + ErrMemberIdRequired KError = 79 + ErrPreferredLeaderNotAvailable KError = 80 + ErrGroupMaxSizeReached KError = 81 + ErrFencedInstancedId KError = 82 ) func (err KError) Error() string { @@ -275,6 +338,50 @@ func (err KError) Error() string { return "kafka server: The broker could not locate the producer metadata associated with the Producer ID." case ErrReassignmentInProgress: return "kafka server: A partition reassignment is in progress." + case ErrDelegationTokenAuthDisabled: + return "kafka server: Delegation Token feature is not enabled." + case ErrDelegationTokenNotFound: + return "kafka server: Delegation Token is not found on server." + case ErrDelegationTokenOwnerMismatch: + return "kafka server: Specified Principal is not valid Owner/Renewer." + case ErrDelegationTokenRequestNotAllowed: + return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels." + case ErrDelegationTokenAuthorizationFailed: + return "kafka server: Delegation Token authorization failed." + case ErrDelegationTokenExpired: + return "kafka server: Delegation Token is expired." + case ErrInvalidPrincipalType: + return "kafka server: Supplied principalType is not supported." + case ErrNonEmptyGroup: + return "kafka server: The group is not empty." + case ErrGroupIDNotFound: + return "kafka server: The group id does not exist." + case ErrFetchSessionIDNotFound: + return "kafka server: The fetch session ID was not found." + case ErrInvalidFetchSessionEpoch: + return "kafka server: The fetch session epoch is invalid." + case ErrListenerNotFound: + return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed." + case ErrTopicDeletionDisabled: + return "kafka server: Topic deletion is disabled." + case ErrFencedLeaderEpoch: + return "kafka server: The leader epoch in the request is older than the epoch on the broker." + case ErrUnknownLeaderEpoch: + return "kafka server: The leader epoch in the request is newer than the epoch on the broker." + case ErrUnsupportedCompressionType: + return "kafka server: The requesting client does not support the compression type of given partition." + case ErrStaleBrokerEpoch: + return "kafka server: Broker epoch has changed" + case ErrOffsetNotAvailable: + return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing" + case ErrMemberIdRequired: + return "kafka server: The group member needs to have a valid member id before actually entering a consumer group" + case ErrPreferredLeaderNotAvailable: + return "kafka server: The preferred leader was not available" + case ErrGroupMaxSizeReached: + return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members." + case ErrFencedInstancedId: + return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id." } return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) diff --git a/vendor/github.com/Shopify/sarama/examples/README.md b/vendor/github.com/Shopify/sarama/examples/README.md deleted file mode 100644 index 85fecefd..00000000 --- a/vendor/github.com/Shopify/sarama/examples/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Sarama examples - -This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarama's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama) - -In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version. - -#### HTTP server - -[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both. diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore b/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore deleted file mode 100644 index 9f6ed425..00000000 --- a/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -http_server -http_server.test diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/README.md b/vendor/github.com/Shopify/sarama/examples/http_server/README.md deleted file mode 100644 index 5ff2bc25..00000000 --- a/vendor/github.com/Shopify/sarama/examples/http_server/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# HTTP server example - -This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background. - -If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background. - -One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together. diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go b/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go deleted file mode 100644 index b6d83c5d..00000000 --- a/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go +++ /dev/null @@ -1,247 +0,0 @@ -package main - -import ( - "github.com/Shopify/sarama" - - "crypto/tls" - "crypto/x509" - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "log" - "net/http" - "os" - "strings" - "time" -) - -var ( - addr = flag.String("addr", ":8080", "The address to bind to") - brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list") - verbose = flag.Bool("verbose", false, "Turn on Sarama logging") - certFile = flag.String("certificate", "", "The optional certificate file for client authentication") - keyFile = flag.String("key", "", "The optional key file for client authentication") - caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication") - verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain") -) - -func main() { - flag.Parse() - - if *verbose { - sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) - } - - if *brokers == "" { - flag.PrintDefaults() - os.Exit(1) - } - - brokerList := strings.Split(*brokers, ",") - log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", ")) - - server := &Server{ - DataCollector: newDataCollector(brokerList), - AccessLogProducer: newAccessLogProducer(brokerList), - } - defer func() { - if err := server.Close(); err != nil { - log.Println("Failed to close server", err) - } - }() - - log.Fatal(server.Run(*addr)) -} - -func createTlsConfiguration() (t *tls.Config) { - if *certFile != "" && *keyFile != "" && *caFile != "" { - cert, err := tls.LoadX509KeyPair(*certFile, *keyFile) - if err != nil { - log.Fatal(err) - } - - caCert, err := ioutil.ReadFile(*caFile) - if err != nil { - log.Fatal(err) - } - - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - - t = &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caCertPool, - InsecureSkipVerify: *verifySsl, - } - } - // will be nil by default if nothing is provided - return t -} - -type Server struct { - DataCollector sarama.SyncProducer - AccessLogProducer sarama.AsyncProducer -} - -func (s *Server) Close() error { - if err := s.DataCollector.Close(); err != nil { - log.Println("Failed to shut down data collector cleanly", err) - } - - if err := s.AccessLogProducer.Close(); err != nil { - log.Println("Failed to shut down access log producer cleanly", err) - } - - return nil -} - -func (s *Server) Handler() http.Handler { - return s.withAccessLog(s.collectQueryStringData()) -} - -func (s *Server) Run(addr string) error { - httpServer := &http.Server{ - Addr: addr, - Handler: s.Handler(), - } - - log.Printf("Listening for requests on %s...\n", addr) - return httpServer.ListenAndServe() -} - -func (s *Server) collectQueryStringData() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.NotFound(w, r) - return - } - - // We are not setting a message key, which means that all messages will - // be distributed randomly over the different partitions. - partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{ - Topic: "important", - Value: sarama.StringEncoder(r.URL.RawQuery), - }) - - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "Failed to store your data:, %s", err) - } else { - // The tuple (topic, partition, offset) can be used as a unique identifier - // for a message in a Kafka cluster. - fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset) - } - }) -} - -type accessLogEntry struct { - Method string `json:"method"` - Host string `json:"host"` - Path string `json:"path"` - IP string `json:"ip"` - ResponseTime float64 `json:"response_time"` - - encoded []byte - err error -} - -func (ale *accessLogEntry) ensureEncoded() { - if ale.encoded == nil && ale.err == nil { - ale.encoded, ale.err = json.Marshal(ale) - } -} - -func (ale *accessLogEntry) Length() int { - ale.ensureEncoded() - return len(ale.encoded) -} - -func (ale *accessLogEntry) Encode() ([]byte, error) { - ale.ensureEncoded() - return ale.encoded, ale.err -} - -func (s *Server) withAccessLog(next http.Handler) http.Handler { - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - started := time.Now() - - next.ServeHTTP(w, r) - - entry := &accessLogEntry{ - Method: r.Method, - Host: r.Host, - Path: r.RequestURI, - IP: r.RemoteAddr, - ResponseTime: float64(time.Since(started)) / float64(time.Second), - } - - // We will use the client's IP address as key. This will cause - // all the access log entries of the same IP address to end up - // on the same partition. - s.AccessLogProducer.Input() <- &sarama.ProducerMessage{ - Topic: "access_log", - Key: sarama.StringEncoder(r.RemoteAddr), - Value: entry, - } - }) -} - -func newDataCollector(brokerList []string) sarama.SyncProducer { - - // For the data collector, we are looking for strong consistency semantics. - // Because we don't change the flush settings, sarama will try to produce messages - // as fast as possible to keep latency low. - config := sarama.NewConfig() - config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message - config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message - config.Producer.Return.Successes = true - tlsConfig := createTlsConfiguration() - if tlsConfig != nil { - config.Net.TLS.Config = tlsConfig - config.Net.TLS.Enable = true - } - - // On the broker side, you may want to change the following settings to get - // stronger consistency guarantees: - // - For your broker, set `unclean.leader.election.enable` to false - // - For the topic, you could increase `min.insync.replicas`. - - producer, err := sarama.NewSyncProducer(brokerList, config) - if err != nil { - log.Fatalln("Failed to start Sarama producer:", err) - } - - return producer -} - -func newAccessLogProducer(brokerList []string) sarama.AsyncProducer { - - // For the access log, we are looking for AP semantics, with high throughput. - // By creating batches of compressed messages, we reduce network I/O at a cost of more latency. - config := sarama.NewConfig() - tlsConfig := createTlsConfiguration() - if tlsConfig != nil { - config.Net.TLS.Enable = true - config.Net.TLS.Config = tlsConfig - } - config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack - config.Producer.Compression = sarama.CompressionSnappy // Compress messages - config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms - - producer, err := sarama.NewAsyncProducer(brokerList, config) - if err != nil { - log.Fatalln("Failed to start Sarama producer:", err) - } - - // We will just log to STDOUT if we're not able to produce messages. - // Note: messages will only be returned here after all retry attempts are exhausted. - go func() { - for err := range producer.Errors() { - log.Println("Failed to write access log entry:", err) - } - }() - - return producer -} diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go b/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go deleted file mode 100644 index 7b2451e2..00000000 --- a/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package main - -import ( - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/Shopify/sarama" - "github.com/Shopify/sarama/mocks" -) - -// In normal operation, we expect one access log entry, -// and one data collector entry. Let's assume both will succeed. -// We should return a HTTP 200 status. -func TestCollectSuccessfully(t *testing.T) { - dataCollectorMock := mocks.NewSyncProducer(t, nil) - dataCollectorMock.ExpectSendMessageAndSucceed() - - accessLogProducerMock := mocks.NewAsyncProducer(t, nil) - accessLogProducerMock.ExpectInputAndSucceed() - - // Now, use dependency injection to use the mocks. - s := &Server{ - DataCollector: dataCollectorMock, - AccessLogProducer: accessLogProducerMock, - } - - // The Server's Close call is important; it will call Close on - // the two mock producers, which will then validate whether all - // expectations are resolved. - defer safeClose(t, s) - - req, err := http.NewRequest("GET", "http://example.com/?data", nil) - if err != nil { - t.Fatal(err) - } - res := httptest.NewRecorder() - s.Handler().ServeHTTP(res, req) - - if res.Code != 200 { - t.Errorf("Expected HTTP status 200, found %d", res.Code) - } - - if string(res.Body.Bytes()) != "Your data is stored with unique identifier important/0/1" { - t.Error("Unexpected response body", res.Body) - } -} - -// Now, let's see if we handle the case of not being able to produce -// to the data collector properly. In this case we should return a 500 status. -func TestCollectionFailure(t *testing.T) { - dataCollectorMock := mocks.NewSyncProducer(t, nil) - dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut) - - accessLogProducerMock := mocks.NewAsyncProducer(t, nil) - accessLogProducerMock.ExpectInputAndSucceed() - - s := &Server{ - DataCollector: dataCollectorMock, - AccessLogProducer: accessLogProducerMock, - } - defer safeClose(t, s) - - req, err := http.NewRequest("GET", "http://example.com/?data", nil) - if err != nil { - t.Fatal(err) - } - res := httptest.NewRecorder() - s.Handler().ServeHTTP(res, req) - - if res.Code != 500 { - t.Errorf("Expected HTTP status 500, found %d", res.Code) - } -} - -// We don't expect any data collector calls because the path is wrong, -// so we are not setting any expectations on the dataCollectorMock. It -// will still generate an access log entry though. -func TestWrongPath(t *testing.T) { - dataCollectorMock := mocks.NewSyncProducer(t, nil) - - accessLogProducerMock := mocks.NewAsyncProducer(t, nil) - accessLogProducerMock.ExpectInputAndSucceed() - - s := &Server{ - DataCollector: dataCollectorMock, - AccessLogProducer: accessLogProducerMock, - } - defer safeClose(t, s) - - req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil) - if err != nil { - t.Fatal(err) - } - res := httptest.NewRecorder() - - s.Handler().ServeHTTP(res, req) - - if res.Code != 404 { - t.Errorf("Expected HTTP status 404, found %d", res.Code) - } -} - -func safeClose(t *testing.T, o io.Closer) { - if err := o.Close(); err != nil { - t.Error(err) - } -} diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go index 462ab8af..f893aeff 100644 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -1,20 +1,41 @@ package sarama type fetchRequestBlock struct { - fetchOffset int64 - maxBytes int32 + Version int16 + currentLeaderEpoch int32 + fetchOffset int64 + logStartOffset int64 + maxBytes int32 } -func (b *fetchRequestBlock) encode(pe packetEncoder) error { +func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error { + b.Version = version + if b.Version >= 9 { + pe.putInt32(b.currentLeaderEpoch) + } pe.putInt64(b.fetchOffset) + if b.Version >= 5 { + pe.putInt64(b.logStartOffset) + } pe.putInt32(b.maxBytes) return nil } -func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { +func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) { + b.Version = version + if b.Version >= 9 { + if b.currentLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } if b.fetchOffset, err = pd.getInt64(); err != nil { return err } + if b.Version >= 5 { + if b.logStartOffset, err = pd.getInt64(); err != nil { + return err + } + } if b.maxBytes, err = pd.getInt32(); err != nil { return err } @@ -25,19 +46,23 @@ func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { // https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at // https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes type FetchRequest struct { - MaxWaitTime int32 - MinBytes int32 - MaxBytes int32 - Version int16 - Isolation IsolationLevel - blocks map[string]map[int32]*fetchRequestBlock + MaxWaitTime int32 + MinBytes int32 + MaxBytes int32 + Version int16 + Isolation IsolationLevel + SessionID int32 + SessionEpoch int32 + blocks map[string]map[int32]*fetchRequestBlock + forgotten map[string][]int32 + RackID string } type IsolationLevel int8 const ( - ReadUncommitted IsolationLevel = 0 - ReadCommitted IsolationLevel = 1 + ReadUncommitted IsolationLevel = iota + ReadCommitted ) func (r *FetchRequest) encode(pe packetEncoder) (err error) { @@ -50,6 +75,10 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) { if r.Version >= 4 { pe.putInt8(int8(r.Isolation)) } + if r.Version >= 7 { + pe.putInt32(r.SessionID) + pe.putInt32(r.SessionEpoch) + } err = pe.putArrayLength(len(r.blocks)) if err != nil { return err @@ -65,17 +94,44 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) { } for partition, block := range blocks { pe.putInt32(partition) - err = block.encode(pe) + err = block.encode(pe, r.Version) if err != nil { return err } } } + if r.Version >= 7 { + err = pe.putArrayLength(len(r.forgotten)) + if err != nil { + return err + } + for topic, partitions := range r.forgotten { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for _, partition := range partitions { + pe.putInt32(partition) + } + } + } + if r.Version >= 11 { + err = pe.putString(r.RackID) + if err != nil { + return err + } + } + return nil } func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version + if _, err = pd.getInt32(); err != nil { return err } @@ -97,6 +153,16 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { } r.Isolation = IsolationLevel(isolation) } + if r.Version >= 7 { + r.SessionID, err = pd.getInt32() + if err != nil { + return err + } + r.SessionEpoch, err = pd.getInt32() + if err != nil { + return err + } + } topicCount, err := pd.getArrayLength() if err != nil { return err @@ -121,12 +187,47 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { return err } fetchBlock := &fetchRequestBlock{} - if err = fetchBlock.decode(pd); err != nil { + if err = fetchBlock.decode(pd, r.Version); err != nil { return err } r.blocks[topic][partition] = fetchBlock } } + + if r.Version >= 7 { + forgottenCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.forgotten = make(map[string][]int32) + for i := 0; i < forgottenCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.forgotten[topic] = make([]int32, partitionCount) + + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + r.forgotten[topic][j] = partition + } + } + } + + if r.Version >= 11 { + r.RackID, err = pd.getString() + if err != nil { + return err + } + } + return nil } @@ -138,18 +239,34 @@ func (r *FetchRequest) version() int16 { return r.Version } +func (r *FetchRequest) headerVersion() int16 { + return 1 +} + func (r *FetchRequest) requiredVersion() KafkaVersion { switch r.Version { + case 0: + return MinVersion case 1: return V0_9_0_0 case 2: return V0_10_0_0 case 3: return V0_10_1_0 - case 4: + case 4, 5: return V0_11_0_0 + case 6: + return V1_0_0_0 + case 7: + return V1_1_0_0 + case 8: + return V2_0_0_0 + case 9, 10: + return V2_1_0_0 + case 11: + return V2_3_0_0 default: - return MinVersion + return MaxVersion } } @@ -158,13 +275,21 @@ func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int r.blocks = make(map[string]map[int32]*fetchRequestBlock) } + if r.Version >= 7 && r.forgotten == nil { + r.forgotten = make(map[string][]int32) + } + if r.blocks[topic] == nil { r.blocks[topic] = make(map[int32]*fetchRequestBlock) } tmp := new(fetchRequestBlock) + tmp.Version = r.Version tmp.maxBytes = maxBytes tmp.fetchOffset = fetchOffset + if r.Version >= 9 { + tmp.currentLeaderEpoch = int32(-1) + } r.blocks[topic][partitionID] = tmp } diff --git a/vendor/github.com/Shopify/sarama/fetch_request_test.go b/vendor/github.com/Shopify/sarama/fetch_request_test.go deleted file mode 100644 index 1a94c2d1..00000000 --- a/vendor/github.com/Shopify/sarama/fetch_request_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package sarama - -import "testing" - -var ( - fetchRequestNoBlocks = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - fetchRequestWithProperties = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF, - 0x00, 0x00, 0x00, 0x00} - - fetchRequestOneBlock = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56} - - fetchRequestOneBlockV4 = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xFF, - 0x01, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56} -) - -func TestFetchRequest(t *testing.T) { - request := new(FetchRequest) - testRequest(t, "no blocks", request, fetchRequestNoBlocks) - - request.MaxWaitTime = 0x20 - request.MinBytes = 0xEF - testRequest(t, "with properties", request, fetchRequestWithProperties) - - request.MaxWaitTime = 0 - request.MinBytes = 0 - request.AddBlock("topic", 0x12, 0x34, 0x56) - testRequest(t, "one block", request, fetchRequestOneBlock) - - request.Version = 4 - request.MaxBytes = 0xFF - request.Isolation = ReadCommitted - testRequest(t, "one block v4", request, fetchRequestOneBlockV4) -} diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go index dade1c47..54b88284 100644 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -1,6 +1,7 @@ package sarama import ( + "sort" "time" ) @@ -29,13 +30,15 @@ func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { } type FetchResponseBlock struct { - Err KError - HighWaterMarkOffset int64 - LastStableOffset int64 - AbortedTransactions []*AbortedTransaction - Records *Records // deprecated: use FetchResponseBlock.Records - RecordsSet []*Records - Partial bool + Err KError + HighWaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + AbortedTransactions []*AbortedTransaction + PreferredReadReplica int32 + Records *Records // deprecated: use FetchResponseBlock.RecordsSet + RecordsSet []*Records + Partial bool } func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { @@ -56,6 +59,13 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) return err } + if version >= 5 { + b.LogStartOffset, err = pd.getInt64() + if err != nil { + return err + } + } + numTransact, err := pd.getArrayLength() if err != nil { return err @@ -74,6 +84,15 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) } } + if version >= 11 { + b.PreferredReadReplica, err = pd.getInt32() + if err != nil { + return err + } + } else { + b.PreferredReadReplica = -1 + } + recordsSize, err := pd.getInt32() if err != nil { return err @@ -165,6 +184,10 @@ func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) if version >= 4 { pe.putInt64(b.LastStableOffset) + if version >= 5 { + pe.putInt64(b.LogStartOffset) + } + if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil { return err } @@ -175,6 +198,10 @@ func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) } } + if version >= 11 { + pe.putInt32(b.PreferredReadReplica) + } + pe.push(&lengthField{}) for _, records := range b.RecordsSet { err = records.encode(pe) @@ -185,10 +212,25 @@ func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) return pe.pop() } +func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction { + // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered + // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself + at := b.AbortedTransactions + sort.Slice( + at, + func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset }, + ) + return at +} + type FetchResponse struct { - Blocks map[string]map[int32]*FetchResponseBlock - ThrottleTime time.Duration - Version int16 // v1 requires 0.9+, v2 requires 0.10+ + Blocks map[string]map[int32]*FetchResponseBlock + ThrottleTime time.Duration + ErrorCode int16 + SessionID int32 + Version int16 + LogAppendTime bool + Timestamp time.Time } func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { @@ -202,6 +244,17 @@ func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { r.ThrottleTime = time.Duration(throttle) * time.Millisecond } + if r.Version >= 7 { + r.ErrorCode, err = pd.getInt16() + if err != nil { + return err + } + r.SessionID, err = pd.getInt32() + if err != nil { + return err + } + } + numTopics, err := pd.getArrayLength() if err != nil { return err @@ -244,6 +297,11 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) } + if r.Version >= 7 { + pe.putInt16(r.ErrorCode) + pe.putInt32(r.SessionID) + } + err = pe.putArrayLength(len(r.Blocks)) if err != nil { return err @@ -267,7 +325,6 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) { return err } } - } return nil } @@ -280,18 +337,34 @@ func (r *FetchResponse) version() int16 { return r.Version } +func (r *FetchResponse) headerVersion() int16 { + return 0 +} + func (r *FetchResponse) requiredVersion() KafkaVersion { switch r.Version { + case 0: + return MinVersion case 1: return V0_9_0_0 case 2: return V0_10_0_0 case 3: return V0_10_1_0 - case 4: + case 4, 5: return V0_11_0_0 + case 6: + return V1_0_0_0 + case 7: + return V1_1_0_0 + case 8: + return V2_0_0_0 + case 9, 10: + return V2_1_0_0 + case 11: + return V2_3_0_0 default: - return MinVersion + return MaxVersion } } @@ -355,10 +428,13 @@ func encodeKV(key, value Encoder) ([]byte, []byte) { return kb, vb } -func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { +func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) { frb := r.getOrCreateBlock(topic, partition) kb, vb := encodeKV(key, value) - msg := &Message{Key: kb, Value: vb} + if r.LogAppendTime { + timestamp = r.Timestamp + } + msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version} msgBlock := &MessageBlock{Msg: msg, Offset: offset} if len(frb.RecordsSet) == 0 { records := newLegacyRecords(&MessageSet{}) @@ -368,18 +444,94 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc set.Messages = append(set.Messages, msgBlock) } -func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { +func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) { frb := r.getOrCreateBlock(topic, partition) kb, vb := encodeKV(key, value) - rec := &Record{Key: kb, Value: vb, OffsetDelta: offset} if len(frb.RecordsSet) == 0 { - records := newDefaultRecords(&RecordBatch{Version: 2}) + records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) frb.RecordsSet = []*Records{&records} } batch := frb.RecordsSet[0].RecordBatch + rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} batch.addRecord(rec) } +// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp +// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse +// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions +func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + + records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) + batch := &RecordBatch{ + Version: 2, + LogAppendTime: r.LogAppendTime, + FirstTimestamp: timestamp, + MaxTimestamp: r.Timestamp, + FirstOffset: offset, + LastOffsetDelta: 0, + ProducerID: producerID, + IsTransactional: isTransactional, + } + rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) + records.RecordBatch = batch + + frb.RecordsSet = append(frb.RecordsSet, &records) +} + +func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + + // batch + batch := &RecordBatch{ + Version: 2, + LogAppendTime: r.LogAppendTime, + FirstTimestamp: timestamp, + MaxTimestamp: r.Timestamp, + FirstOffset: offset, + LastOffsetDelta: 0, + ProducerID: producerID, + IsTransactional: true, + Control: true, + } + + // records + records := newDefaultRecords(nil) + records.RecordBatch = batch + + // record + crAbort := ControlRecord{ + Version: 0, + Type: recordType, + } + crKey := &realEncoder{raw: make([]byte, 4)} + crValue := &realEncoder{raw: make([]byte, 6)} + crAbort.encode(crKey, crValue) + rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) + + frb.RecordsSet = append(frb.RecordsSet, &records) +} + +func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { + r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0) +} + +func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { + r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{}) +} + +func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) { + r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{}) +} + +func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) { + // define controlRecord key and value + r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{}) +} + func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) { frb := r.getOrCreateBlock(topic, partition) if len(frb.RecordsSet) == 0 { diff --git a/vendor/github.com/Shopify/sarama/fetch_response_test.go b/vendor/github.com/Shopify/sarama/fetch_response_test.go deleted file mode 100644 index 91702764..00000000 --- a/vendor/github.com/Shopify/sarama/fetch_response_test.go +++ /dev/null @@ -1,331 +0,0 @@ -package sarama - -import ( - "bytes" - "testing" -) - -var ( - emptyFetchResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} - - oneMessageFetchResponse = []byte{ - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x05, - 0x00, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, - 0x00, 0x00, 0x00, 0x1C, - // messageSet - 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x10, - // message - 0x23, 0x96, 0x4a, 0xf7, // CRC - 0x00, - 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE} - - overflowMessageFetchResponse = []byte{ - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x05, - 0x00, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, - 0x00, 0x00, 0x00, 0x30, - // messageSet - 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x10, - // message - 0x23, 0x96, 0x4a, 0xf7, // CRC - 0x00, - 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE, - // overflow messageSet - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0xFF, - // overflow bytes - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - oneRecordFetchResponse = []byte{ - 0x00, 0x00, 0x00, 0x00, // ThrottleTime - 0x00, 0x00, 0x00, 0x01, // Number of Topics - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic - 0x00, 0x00, 0x00, 0x01, // Number of Partitions - 0x00, 0x00, 0x00, 0x05, // Partition - 0x00, 0x01, // Error - 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset - 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset - 0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions - 0x00, 0x00, 0x00, 0x52, // Records length - // recordBatch - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x46, - 0x00, 0x00, 0x00, 0x00, - 0x02, - 0xDB, 0x47, 0x14, 0xC9, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - // record - 0x28, - 0x00, - 0x0A, - 0x00, - 0x08, 0x01, 0x02, 0x03, 0x04, - 0x06, 0x05, 0x06, 0x07, - 0x02, - 0x06, 0x08, 0x09, 0x0A, - 0x04, 0x0B, 0x0C} - - oneMessageFetchResponseV4 = []byte{ - 0x00, 0x00, 0x00, 0x00, // ThrottleTime - 0x00, 0x00, 0x00, 0x01, // Number of Topics - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic - 0x00, 0x00, 0x00, 0x01, // Number of Partitions - 0x00, 0x00, 0x00, 0x05, // Partition - 0x00, 0x01, // Error - 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset - 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset - 0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions - 0x00, 0x00, 0x00, 0x1C, - // messageSet - 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x10, - // message - 0x23, 0x96, 0x4a, 0xf7, // CRC - 0x00, - 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE} -) - -func TestEmptyFetchResponse(t *testing.T) { - response := FetchResponse{} - testVersionDecodable(t, "empty", &response, emptyFetchResponse, 0) - - if len(response.Blocks) != 0 { - t.Error("Decoding produced topic blocks where there were none.") - } - -} - -func TestOneMessageFetchResponse(t *testing.T) { - response := FetchResponse{} - testVersionDecodable(t, "one message", &response, oneMessageFetchResponse, 0) - - if len(response.Blocks) != 1 { - t.Fatal("Decoding produced incorrect number of topic blocks.") - } - - if len(response.Blocks["topic"]) != 1 { - t.Fatal("Decoding produced incorrect number of partition blocks for topic.") - } - - block := response.GetBlock("topic", 5) - if block == nil { - t.Fatal("GetBlock didn't return block.") - } - if block.Err != ErrOffsetOutOfRange { - t.Error("Decoding didn't produce correct error code.") - } - if block.HighWaterMarkOffset != 0x10101010 { - t.Error("Decoding didn't produce correct high water mark offset.") - } - partial, err := block.isPartial() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if partial { - t.Error("Decoding detected a partial trailing message where there wasn't one.") - } - - n, err := block.numRecords() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if n != 1 { - t.Fatal("Decoding produced incorrect number of messages.") - } - msgBlock := block.RecordsSet[0].MsgSet.Messages[0] - if msgBlock.Offset != 0x550000 { - t.Error("Decoding produced incorrect message offset.") - } - msg := msgBlock.Msg - if msg.Codec != CompressionNone { - t.Error("Decoding produced incorrect message compression.") - } - if msg.Key != nil { - t.Error("Decoding produced message key where there was none.") - } - if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) { - t.Error("Decoding produced incorrect message value.") - } -} - -func TestOverflowMessageFetchResponse(t *testing.T) { - response := FetchResponse{} - testVersionDecodable(t, "overflow message", &response, overflowMessageFetchResponse, 0) - - if len(response.Blocks) != 1 { - t.Fatal("Decoding produced incorrect number of topic blocks.") - } - - if len(response.Blocks["topic"]) != 1 { - t.Fatal("Decoding produced incorrect number of partition blocks for topic.") - } - - block := response.GetBlock("topic", 5) - if block == nil { - t.Fatal("GetBlock didn't return block.") - } - if block.Err != ErrOffsetOutOfRange { - t.Error("Decoding didn't produce correct error code.") - } - if block.HighWaterMarkOffset != 0x10101010 { - t.Error("Decoding didn't produce correct high water mark offset.") - } - partial, err := block.Records.isPartial() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if partial { - t.Error("Decoding detected a partial trailing message where there wasn't one.") - } - overflow, err := block.Records.isOverflow() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if !overflow { - t.Error("Decoding detected a partial trailing message where there wasn't one.") - } - - n, err := block.Records.numRecords() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if n != 1 { - t.Fatal("Decoding produced incorrect number of messages.") - } - msgBlock := block.Records.MsgSet.Messages[0] - if msgBlock.Offset != 0x550000 { - t.Error("Decoding produced incorrect message offset.") - } - msg := msgBlock.Msg - if msg.Codec != CompressionNone { - t.Error("Decoding produced incorrect message compression.") - } - if msg.Key != nil { - t.Error("Decoding produced message key where there was none.") - } - if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) { - t.Error("Decoding produced incorrect message value.") - } -} - -func TestOneRecordFetchResponse(t *testing.T) { - response := FetchResponse{} - testVersionDecodable(t, "one record", &response, oneRecordFetchResponse, 4) - - if len(response.Blocks) != 1 { - t.Fatal("Decoding produced incorrect number of topic blocks.") - } - - if len(response.Blocks["topic"]) != 1 { - t.Fatal("Decoding produced incorrect number of partition blocks for topic.") - } - - block := response.GetBlock("topic", 5) - if block == nil { - t.Fatal("GetBlock didn't return block.") - } - if block.Err != ErrOffsetOutOfRange { - t.Error("Decoding didn't produce correct error code.") - } - if block.HighWaterMarkOffset != 0x10101010 { - t.Error("Decoding didn't produce correct high water mark offset.") - } - partial, err := block.isPartial() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if partial { - t.Error("Decoding detected a partial trailing record where there wasn't one.") - } - - n, err := block.numRecords() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if n != 1 { - t.Fatal("Decoding produced incorrect number of records.") - } - rec := block.RecordsSet[0].RecordBatch.Records[0] - if !bytes.Equal(rec.Key, []byte{0x01, 0x02, 0x03, 0x04}) { - t.Error("Decoding produced incorrect record key.") - } - if !bytes.Equal(rec.Value, []byte{0x05, 0x06, 0x07}) { - t.Error("Decoding produced incorrect record value.") - } -} - -func TestOneMessageFetchResponseV4(t *testing.T) { - response := FetchResponse{} - testVersionDecodable(t, "one message v4", &response, oneMessageFetchResponseV4, 4) - - if len(response.Blocks) != 1 { - t.Fatal("Decoding produced incorrect number of topic blocks.") - } - - if len(response.Blocks["topic"]) != 1 { - t.Fatal("Decoding produced incorrect number of partition blocks for topic.") - } - - block := response.GetBlock("topic", 5) - if block == nil { - t.Fatal("GetBlock didn't return block.") - } - if block.Err != ErrOffsetOutOfRange { - t.Error("Decoding didn't produce correct error code.") - } - if block.HighWaterMarkOffset != 0x10101010 { - t.Error("Decoding didn't produce correct high water mark offset.") - } - partial, err := block.isPartial() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if partial { - t.Error("Decoding detected a partial trailing record where there wasn't one.") - } - - n, err := block.numRecords() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if n != 1 { - t.Fatal("Decoding produced incorrect number of records.") - } - msgBlock := block.RecordsSet[0].MsgSet.Messages[0] - if msgBlock.Offset != 0x550000 { - t.Error("Decoding produced incorrect message offset.") - } - msg := msgBlock.Msg - if msg.Codec != CompressionNone { - t.Error("Decoding produced incorrect message compression.") - } - if msg.Key != nil { - t.Error("Decoding produced message key where there was none.") - } - if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) { - t.Error("Decoding produced incorrect message value.") - } -} diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go index 0ab5cb5f..597bcbf7 100644 --- a/vendor/github.com/Shopify/sarama/find_coordinator_request.go +++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go @@ -3,8 +3,8 @@ package sarama type CoordinatorType int8 const ( - CoordinatorGroup CoordinatorType = 0 - CoordinatorTransaction CoordinatorType = 1 + CoordinatorGroup CoordinatorType = iota + CoordinatorTransaction ) type FindCoordinatorRequest struct { @@ -51,6 +51,10 @@ func (f *FindCoordinatorRequest) version() int16 { return f.Version } +func (r *FindCoordinatorRequest) headerVersion() int16 { + return 1 +} + func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion { switch f.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request_test.go b/vendor/github.com/Shopify/sarama/find_coordinator_request_test.go deleted file mode 100644 index 7e889b07..00000000 --- a/vendor/github.com/Shopify/sarama/find_coordinator_request_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package sarama - -import "testing" - -var ( - findCoordinatorRequestConsumerGroup = []byte{ - 0, 5, 'g', 'r', 'o', 'u', 'p', - 0, - } - - findCoordinatorRequestTransaction = []byte{ - 0, 13, 't', 'r', 'a', 'n', 's', 'a', 'c', 't', 'i', 'o', 'n', 'i', 'd', - 1, - } -) - -func TestFindCoordinatorRequest(t *testing.T) { - req := &FindCoordinatorRequest{ - Version: 1, - CoordinatorKey: "group", - CoordinatorType: CoordinatorGroup, - } - - testRequest(t, "version 1 - group", req, findCoordinatorRequestConsumerGroup) - - req = &FindCoordinatorRequest{ - Version: 1, - CoordinatorKey: "transactionid", - CoordinatorType: CoordinatorTransaction, - } - - testRequest(t, "version 1 - transaction", req, findCoordinatorRequestTransaction) -} diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go index 9c900e8b..83a648ad 100644 --- a/vendor/github.com/Shopify/sarama/find_coordinator_response.go +++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go @@ -82,6 +82,10 @@ func (f *FindCoordinatorResponse) version() int16 { return f.Version } +func (r *FindCoordinatorResponse) headerVersion() int16 { + return 0 +} + func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion { switch f.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response_test.go b/vendor/github.com/Shopify/sarama/find_coordinator_response_test.go deleted file mode 100644 index 417a76c6..00000000 --- a/vendor/github.com/Shopify/sarama/find_coordinator_response_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -func TestFindCoordinatorResponse(t *testing.T) { - errMsg := "kaboom" - - for _, tc := range []struct { - desc string - response *FindCoordinatorResponse - encoded []byte - }{{ - desc: "version 0 - no error", - response: &FindCoordinatorResponse{ - Version: 0, - Err: ErrNoError, - Coordinator: &Broker{ - id: 7, - addr: "host:9092", - }, - }, - encoded: []byte{ - 0, 0, // Err - 0, 0, 0, 7, // Coordinator.ID - 0, 4, 'h', 'o', 's', 't', // Coordinator.Host - 0, 0, 35, 132, // Coordinator.Port - }, - }, { - desc: "version 1 - no error", - response: &FindCoordinatorResponse{ - Version: 1, - ThrottleTime: 100 * time.Millisecond, - Err: ErrNoError, - Coordinator: &Broker{ - id: 7, - addr: "host:9092", - }, - }, - encoded: []byte{ - 0, 0, 0, 100, // ThrottleTime - 0, 0, // Err - 255, 255, // ErrMsg: empty - 0, 0, 0, 7, // Coordinator.ID - 0, 4, 'h', 'o', 's', 't', // Coordinator.Host - 0, 0, 35, 132, // Coordinator.Port - }, - }, { - desc: "version 0 - error", - response: &FindCoordinatorResponse{ - Version: 0, - Err: ErrConsumerCoordinatorNotAvailable, - Coordinator: NoNode, - }, - encoded: []byte{ - 0, 15, // Err - 255, 255, 255, 255, // Coordinator.ID: -1 - 0, 0, // Coordinator.Host: "" - 255, 255, 255, 255, // Coordinator.Port: -1 - }, - }, { - desc: "version 1 - error", - response: &FindCoordinatorResponse{ - Version: 1, - ThrottleTime: 100 * time.Millisecond, - Err: ErrConsumerCoordinatorNotAvailable, - ErrMsg: &errMsg, - Coordinator: NoNode, - }, - encoded: []byte{ - 0, 0, 0, 100, // ThrottleTime - 0, 15, // Err - 0, 6, 'k', 'a', 'b', 'o', 'o', 'm', // ErrMsg - 255, 255, 255, 255, // Coordinator.ID: -1 - 0, 0, // Coordinator.Host: "" - 255, 255, 255, 255, // Coordinator.Port: -1 - }, - }} { - testResponse(t, tc.desc, tc.response, tc.encoded) - } -} diff --git a/vendor/github.com/Shopify/sarama/functional_client_test.go b/vendor/github.com/Shopify/sarama/functional_client_test.go deleted file mode 100644 index 2bf99d25..00000000 --- a/vendor/github.com/Shopify/sarama/functional_client_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package sarama - -import ( - "fmt" - "testing" - "time" -) - -func TestFuncConnectionFailure(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - Proxies["kafka1"].Enabled = false - SaveProxy(t, "kafka1") - - config := NewConfig() - config.Metadata.Retry.Max = 1 - - _, err := NewClient([]string{kafkaBrokers[0]}, config) - if err != ErrOutOfBrokers { - t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err) - } -} - -func TestFuncClientMetadata(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - config := NewConfig() - config.Metadata.Retry.Max = 1 - config.Metadata.Retry.Backoff = 10 * time.Millisecond - client, err := NewClient(kafkaBrokers, config) - if err != nil { - t.Fatal(err) - } - - if err := client.RefreshMetadata("unknown_topic"); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - if _, err := client.Leader("unknown_topic", 0); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - if _, err := client.Replicas("invalid/topic", 0); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - partitions, err := client.Partitions("test.4") - if err != nil { - t.Error(err) - } - if len(partitions) != 4 { - t.Errorf("Expected test.4 topic to have 4 partitions, found %v", partitions) - } - - partitions, err = client.Partitions("test.1") - if err != nil { - t.Error(err) - } - if len(partitions) != 1 { - t.Errorf("Expected test.1 topic to have 1 partitions, found %v", partitions) - } - - safeClose(t, client) -} - -func TestFuncClientCoordinator(t *testing.T) { - checkKafkaVersion(t, "0.8.2") - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - client, err := NewClient(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - broker, err := client.Coordinator(fmt.Sprintf("another_new_consumer_group_%d", i)) - if err != nil { - t.Fatal(err) - } - - if connected, err := broker.Connected(); !connected || err != nil { - t.Errorf("Expected to coordinator %s broker to be properly connected.", broker.Addr()) - } - } - - safeClose(t, client) -} diff --git a/vendor/github.com/Shopify/sarama/functional_consumer_group_test.go b/vendor/github.com/Shopify/sarama/functional_consumer_group_test.go deleted file mode 100644 index ae376086..00000000 --- a/vendor/github.com/Shopify/sarama/functional_consumer_group_test.go +++ /dev/null @@ -1,418 +0,0 @@ -// +build go1.9 - -package sarama - -import ( - "context" - "fmt" - "log" - "reflect" - "sync" - "sync/atomic" - "testing" - "time" -) - -func TestFuncConsumerGroupPartitioning(t *testing.T) { - checkKafkaVersion(t, "0.10.2") - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - groupID := testFuncConsumerGroupID(t) - - // start M1 - m1 := runTestFuncConsumerGroupMember(t, groupID, "M1", 0, nil) - defer m1.Stop() - m1.WaitForState(2) - m1.WaitForClaims(map[string]int{"test.4": 4}) - m1.WaitForHandlers(4) - - // start M2 - m2 := runTestFuncConsumerGroupMember(t, groupID, "M2", 0, nil, "test.1", "test.4") - defer m2.Stop() - m2.WaitForState(2) - - // assert that claims are shared among both members - m1.WaitForClaims(map[string]int{"test.4": 2}) - m1.WaitForHandlers(2) - m2.WaitForClaims(map[string]int{"test.1": 1, "test.4": 2}) - m2.WaitForHandlers(3) - - // shutdown M1, wait for M2 to take over - m1.AssertCleanShutdown() - m2.WaitForClaims(map[string]int{"test.1": 1, "test.4": 4}) - m2.WaitForHandlers(5) - - // shutdown M2 - m2.AssertCleanShutdown() -} - -func TestFuncConsumerGroupExcessConsumers(t *testing.T) { - checkKafkaVersion(t, "0.10.2") - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - groupID := testFuncConsumerGroupID(t) - - // start members - m1 := runTestFuncConsumerGroupMember(t, groupID, "M1", 0, nil) - defer m1.Stop() - m2 := runTestFuncConsumerGroupMember(t, groupID, "M2", 0, nil) - defer m2.Stop() - m3 := runTestFuncConsumerGroupMember(t, groupID, "M3", 0, nil) - defer m3.Stop() - m4 := runTestFuncConsumerGroupMember(t, groupID, "M4", 0, nil) - defer m4.Stop() - - m1.WaitForClaims(map[string]int{"test.4": 1}) - m2.WaitForClaims(map[string]int{"test.4": 1}) - m3.WaitForClaims(map[string]int{"test.4": 1}) - m4.WaitForClaims(map[string]int{"test.4": 1}) - - // start M5 - m5 := runTestFuncConsumerGroupMember(t, groupID, "M5", 0, nil) - defer m5.Stop() - m5.WaitForState(1) - m5.AssertNoErrs() - - // assert that claims are shared among both members - m4.AssertCleanShutdown() - m5.WaitForState(2) - m5.WaitForClaims(map[string]int{"test.4": 1}) - - // shutdown everything - m1.AssertCleanShutdown() - m2.AssertCleanShutdown() - m3.AssertCleanShutdown() - m5.AssertCleanShutdown() -} - -func TestFuncConsumerGroupFuzzy(t *testing.T) { - checkKafkaVersion(t, "0.10.2") - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - if err := testFuncConsumerGroupFuzzySeed("test.4"); err != nil { - t.Fatal(err) - } - - groupID := testFuncConsumerGroupID(t) - sink := &testFuncConsumerGroupSink{msgs: make(chan testFuncConsumerGroupMessage, 20000)} - waitForMessages := func(t *testing.T, n int) { - t.Helper() - - for i := 0; i < 600; i++ { - if sink.Len() >= n { - break - } - time.Sleep(100 * time.Millisecond) - } - if sz := sink.Len(); sz < n { - log.Fatalf("expected to consume %d messages, but consumed %d", n, sz) - } - } - - defer runTestFuncConsumerGroupMember(t, groupID, "M1", 1500, sink).Stop() - defer runTestFuncConsumerGroupMember(t, groupID, "M2", 3000, sink).Stop() - defer runTestFuncConsumerGroupMember(t, groupID, "M3", 1500, sink).Stop() - defer runTestFuncConsumerGroupMember(t, groupID, "M4", 200, sink).Stop() - defer runTestFuncConsumerGroupMember(t, groupID, "M5", 100, sink).Stop() - waitForMessages(t, 3000) - - defer runTestFuncConsumerGroupMember(t, groupID, "M6", 300, sink).Stop() - defer runTestFuncConsumerGroupMember(t, groupID, "M7", 400, sink).Stop() - defer runTestFuncConsumerGroupMember(t, groupID, "M8", 500, sink).Stop() - defer runTestFuncConsumerGroupMember(t, groupID, "M9", 2000, sink).Stop() - waitForMessages(t, 8000) - - defer runTestFuncConsumerGroupMember(t, groupID, "M10", 1000, sink).Stop() - waitForMessages(t, 10000) - - defer runTestFuncConsumerGroupMember(t, groupID, "M11", 1000, sink).Stop() - defer runTestFuncConsumerGroupMember(t, groupID, "M12", 2500, sink).Stop() - waitForMessages(t, 12000) - - defer runTestFuncConsumerGroupMember(t, groupID, "M13", 1000, sink).Stop() - waitForMessages(t, 15000) - - if umap := sink.Close(); len(umap) != 15000 { - dupes := make(map[string][]string) - for k, v := range umap { - if len(v) > 1 { - dupes[k] = v - } - } - t.Fatalf("expected %d unique messages to be consumed but got %d, including %d duplicates:\n%v", 15000, len(umap), len(dupes), dupes) - } -} - -// -------------------------------------------------------------------- - -func testFuncConsumerGroupID(t *testing.T) string { - return fmt.Sprintf("sarama.%s%d", t.Name(), time.Now().UnixNano()) -} - -func testFuncConsumerGroupFuzzySeed(topic string) error { - client, err := NewClient(kafkaBrokers, nil) - if err != nil { - return err - } - defer func() { _ = client.Close() }() - - total := int64(0) - for pn := int32(0); pn < 4; pn++ { - newest, err := client.GetOffset(topic, pn, OffsetNewest) - if err != nil { - return err - } - oldest, err := client.GetOffset(topic, pn, OffsetOldest) - if err != nil { - return err - } - total = total + newest - oldest - } - if total >= 21000 { - return nil - } - - producer, err := NewAsyncProducerFromClient(client) - if err != nil { - return err - } - for i := total; i < 21000; i++ { - producer.Input() <- &ProducerMessage{Topic: topic, Value: ByteEncoder([]byte("testdata"))} - } - return producer.Close() -} - -type testFuncConsumerGroupMessage struct { - ClientID string - *ConsumerMessage -} - -type testFuncConsumerGroupSink struct { - msgs chan testFuncConsumerGroupMessage - count int32 -} - -func (s *testFuncConsumerGroupSink) Len() int { - if s == nil { - return -1 - } - return int(atomic.LoadInt32(&s.count)) -} - -func (s *testFuncConsumerGroupSink) Push(clientID string, m *ConsumerMessage) { - if s != nil { - s.msgs <- testFuncConsumerGroupMessage{ClientID: clientID, ConsumerMessage: m} - atomic.AddInt32(&s.count, 1) - } -} - -func (s *testFuncConsumerGroupSink) Close() map[string][]string { - close(s.msgs) - - res := make(map[string][]string) - for msg := range s.msgs { - key := fmt.Sprintf("%s-%d:%d", msg.Topic, msg.Partition, msg.Offset) - res[key] = append(res[key], msg.ClientID) - } - return res -} - -type testFuncConsumerGroupMember struct { - ConsumerGroup - clientID string - claims map[string]int - state int32 - handlers int32 - errs []error - maxMessages int32 - isCapped bool - sink *testFuncConsumerGroupSink - - t *testing.T - mu sync.RWMutex -} - -func runTestFuncConsumerGroupMember(t *testing.T, groupID, clientID string, maxMessages int32, sink *testFuncConsumerGroupSink, topics ...string) *testFuncConsumerGroupMember { - t.Helper() - - config := NewConfig() - config.ClientID = clientID - config.Version = V0_10_2_0 - config.Consumer.Return.Errors = true - config.Consumer.Offsets.Initial = OffsetOldest - config.Consumer.Group.Rebalance.Timeout = 10 * time.Second - - group, err := NewConsumerGroup(kafkaBrokers, groupID, config) - if err != nil { - t.Fatal(err) - return nil - } - - if len(topics) == 0 { - topics = []string{"test.4"} - } - - member := &testFuncConsumerGroupMember{ - ConsumerGroup: group, - clientID: clientID, - claims: make(map[string]int), - maxMessages: maxMessages, - isCapped: maxMessages != 0, - sink: sink, - t: t, - } - go member.loop(topics) - return member -} - -func (m *testFuncConsumerGroupMember) AssertCleanShutdown() { - m.t.Helper() - - if err := m.Close(); err != nil { - m.t.Fatalf("unexpected error on Close(): %v", err) - } - m.WaitForState(4) - m.WaitForHandlers(0) - m.AssertNoErrs() -} - -func (m *testFuncConsumerGroupMember) AssertNoErrs() { - m.t.Helper() - - var errs []error - m.mu.RLock() - errs = append(errs, m.errs...) - m.mu.RUnlock() - - if len(errs) != 0 { - m.t.Fatalf("unexpected consumer errors: %v", errs) - } -} - -func (m *testFuncConsumerGroupMember) WaitForState(expected int32) { - m.t.Helper() - - m.waitFor("state", expected, func() (interface{}, error) { - return atomic.LoadInt32(&m.state), nil - }) -} - -func (m *testFuncConsumerGroupMember) WaitForHandlers(expected int) { - m.t.Helper() - - m.waitFor("handlers", expected, func() (interface{}, error) { - return int(atomic.LoadInt32(&m.handlers)), nil - }) -} - -func (m *testFuncConsumerGroupMember) WaitForClaims(expected map[string]int) { - m.t.Helper() - - m.waitFor("claims", expected, func() (interface{}, error) { - m.mu.RLock() - claims := m.claims - m.mu.RUnlock() - return claims, nil - }) -} - -func (m *testFuncConsumerGroupMember) Stop() { _ = m.Close() } - -func (m *testFuncConsumerGroupMember) Setup(s ConsumerGroupSession) error { - // store claims - claims := make(map[string]int) - for topic, partitions := range s.Claims() { - claims[topic] = len(partitions) - } - m.mu.Lock() - m.claims = claims - m.mu.Unlock() - - // enter post-setup state - atomic.StoreInt32(&m.state, 2) - return nil -} -func (m *testFuncConsumerGroupMember) Cleanup(s ConsumerGroupSession) error { - // enter post-cleanup state - atomic.StoreInt32(&m.state, 3) - return nil -} -func (m *testFuncConsumerGroupMember) ConsumeClaim(s ConsumerGroupSession, c ConsumerGroupClaim) error { - atomic.AddInt32(&m.handlers, 1) - defer atomic.AddInt32(&m.handlers, -1) - - for msg := range c.Messages() { - if n := atomic.AddInt32(&m.maxMessages, -1); m.isCapped && n < 0 { - break - } - s.MarkMessage(msg, "") - m.sink.Push(m.clientID, msg) - } - return nil -} - -func (m *testFuncConsumerGroupMember) waitFor(kind string, expected interface{}, factory func() (interface{}, error)) { - m.t.Helper() - - deadline := time.NewTimer(60 * time.Second) - defer deadline.Stop() - - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - var actual interface{} - for { - var err error - if actual, err = factory(); err != nil { - m.t.Errorf("failed retrieve value, expected %s %#v but received error %v", kind, expected, err) - } - - if reflect.DeepEqual(expected, actual) { - return - } - - select { - case <-deadline.C: - m.t.Fatalf("ttl exceeded, expected %s %#v but got %#v", kind, expected, actual) - return - case <-ticker.C: - } - } -} - -func (m *testFuncConsumerGroupMember) loop(topics []string) { - defer atomic.StoreInt32(&m.state, 4) - - go func() { - for err := range m.Errors() { - _ = m.Close() - - m.mu.Lock() - m.errs = append(m.errs, err) - m.mu.Unlock() - } - }() - - ctx := context.Background() - for { - // set state to pre-consume - atomic.StoreInt32(&m.state, 1) - - if err := m.Consume(ctx, topics, m); err == ErrClosedConsumerGroup { - return - } else if err != nil { - m.mu.Lock() - m.errs = append(m.errs, err) - m.mu.Unlock() - return - } - - // return if capped - if n := atomic.LoadInt32(&m.maxMessages); m.isCapped && n < 0 { - return - } - } -} diff --git a/vendor/github.com/Shopify/sarama/functional_consumer_test.go b/vendor/github.com/Shopify/sarama/functional_consumer_test.go deleted file mode 100644 index 83bec033..00000000 --- a/vendor/github.com/Shopify/sarama/functional_consumer_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package sarama - -import ( - "fmt" - "math" - "os" - "sort" - "sync" - "testing" - "time" -) - -func TestFuncConsumerOffsetOutOfRange(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - consumer, err := NewConsumer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - - if _, err := consumer.ConsumePartition("test.1", 0, -10); err != ErrOffsetOutOfRange { - t.Error("Expected ErrOffsetOutOfRange, got:", err) - } - - if _, err := consumer.ConsumePartition("test.1", 0, math.MaxInt64); err != ErrOffsetOutOfRange { - t.Error("Expected ErrOffsetOutOfRange, got:", err) - } - - safeClose(t, consumer) -} - -func TestConsumerHighWaterMarkOffset(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - p, err := NewSyncProducer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - defer safeClose(t, p) - - _, offset, err := p.SendMessage(&ProducerMessage{Topic: "test.1", Value: StringEncoder("Test")}) - if err != nil { - t.Fatal(err) - } - - c, err := NewConsumer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - defer safeClose(t, c) - - pc, err := c.ConsumePartition("test.1", 0, offset) - if err != nil { - t.Fatal(err) - } - - <-pc.Messages() - - if hwmo := pc.HighWaterMarkOffset(); hwmo != offset+1 { - t.Logf("Last produced offset %d; high water mark should be one higher but found %d.", offset, hwmo) - } - - safeClose(t, pc) -} - -// Makes sure that messages produced by all supported client versions/ -// compression codecs (except LZ4) combinations can be consumed by all -// supported consumer versions. It relies on the KAFKA_VERSION environment -// variable to provide the version of the test Kafka cluster. -// -// Note that LZ4 codec was introduced in v0.10.0.0 and therefore is excluded -// from this test case. It has a similar version matrix test case below that -// only checks versions from v0.10.0.0 until KAFKA_VERSION. -func TestVersionMatrix(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - // Produce lot's of message with all possible combinations of supported - // protocol versions and compressions for the except of LZ4. - testVersions := versionRange(V0_8_2_0) - allCodecsButLZ4 := []CompressionCodec{CompressionNone, CompressionGZIP, CompressionSnappy} - producedMessages := produceMsgs(t, testVersions, allCodecsButLZ4, 17, 100) - - // When/Then - consumeMsgs(t, testVersions, producedMessages) -} - -// Support for LZ4 codec was introduced in v0.10.0.0 so a version matrix to -// test LZ4 should start with v0.10.0.0. -func TestVersionMatrixLZ4(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - // Produce lot's of message with all possible combinations of supported - // protocol versions starting with v0.10 (first where LZ4 was supported) - // and all possible compressions. - testVersions := versionRange(V0_10_0_0) - allCodecs := []CompressionCodec{CompressionNone, CompressionGZIP, CompressionSnappy, CompressionLZ4} - producedMessages := produceMsgs(t, testVersions, allCodecs, 17, 100) - - // When/Then - consumeMsgs(t, testVersions, producedMessages) -} - -func prodMsg2Str(prodMsg *ProducerMessage) string { - return fmt.Sprintf("{offset: %d, value: %s}", prodMsg.Offset, string(prodMsg.Value.(StringEncoder))) -} - -func consMsg2Str(consMsg *ConsumerMessage) string { - return fmt.Sprintf("{offset: %d, value: %s}", consMsg.Offset, string(consMsg.Value)) -} - -func versionRange(lower KafkaVersion) []KafkaVersion { - // Get the test cluster version from the environment. If there is nothing - // there then assume the highest. - upper, err := ParseKafkaVersion(os.Getenv("KAFKA_VERSION")) - if err != nil { - upper = MaxVersion - } - - versions := make([]KafkaVersion, 0, len(SupportedVersions)) - for _, v := range SupportedVersions { - if !v.IsAtLeast(lower) { - continue - } - if !upper.IsAtLeast(v) { - return versions - } - versions = append(versions, v) - } - return versions -} - -func produceMsgs(t *testing.T, clientVersions []KafkaVersion, codecs []CompressionCodec, flush int, countPerVerCodec int) []*ProducerMessage { - var wg sync.WaitGroup - var producedMessagesMu sync.Mutex - var producedMessages []*ProducerMessage - for _, prodVer := range clientVersions { - for _, codec := range codecs { - prodCfg := NewConfig() - prodCfg.Version = prodVer - prodCfg.Producer.Return.Successes = true - prodCfg.Producer.Return.Errors = true - prodCfg.Producer.Flush.MaxMessages = flush - prodCfg.Producer.Compression = codec - - p, err := NewSyncProducer(kafkaBrokers, prodCfg) - if err != nil { - t.Errorf("Failed to create producer: version=%s, compression=%s, err=%v", prodVer, codec, err) - continue - } - defer safeClose(t, p) - for i := 0; i < countPerVerCodec; i++ { - msg := &ProducerMessage{ - Topic: "test.1", - Value: StringEncoder(fmt.Sprintf("msg:%s:%s:%d", prodVer, codec, i)), - } - wg.Add(1) - go func() { - defer wg.Done() - _, _, err := p.SendMessage(msg) - if err != nil { - t.Errorf("Failed to produce message: %s, err=%v", msg.Value, err) - } - producedMessagesMu.Lock() - producedMessages = append(producedMessages, msg) - producedMessagesMu.Unlock() - }() - } - } - } - wg.Wait() - - // Sort produced message in ascending offset order. - sort.Slice(producedMessages, func(i, j int) bool { - return producedMessages[i].Offset < producedMessages[j].Offset - }) - t.Logf("*** Total produced %d, firstOffset=%d, lastOffset=%d\n", - len(producedMessages), producedMessages[0].Offset, producedMessages[len(producedMessages)-1].Offset) - return producedMessages -} - -func consumeMsgs(t *testing.T, clientVersions []KafkaVersion, producedMessages []*ProducerMessage) { - // Consume all produced messages with all client versions supported by the - // cluster. -consumerVersionLoop: - for _, consVer := range clientVersions { - t.Logf("*** Consuming with client version %s\n", consVer) - // Create a partition consumer that should start from the first produced - // message. - consCfg := NewConfig() - consCfg.Version = consVer - c, err := NewConsumer(kafkaBrokers, consCfg) - if err != nil { - t.Fatal(err) - } - defer safeClose(t, c) - pc, err := c.ConsumePartition("test.1", 0, producedMessages[0].Offset) - if err != nil { - t.Fatal(err) - } - defer safeClose(t, pc) - - // Consume as many messages as there have been produced and make sure that - // order is preserved. - for i, prodMsg := range producedMessages { - select { - case consMsg := <-pc.Messages(): - if consMsg.Offset != prodMsg.Offset { - t.Errorf("Consumed unexpected offset: version=%s, index=%d, want=%s, got=%s", - consVer, i, prodMsg2Str(prodMsg), consMsg2Str(consMsg)) - continue consumerVersionLoop - } - if string(consMsg.Value) != string(prodMsg.Value.(StringEncoder)) { - t.Errorf("Consumed unexpected msg: version=%s, index=%d, want=%s, got=%s", - consVer, i, prodMsg2Str(prodMsg), consMsg2Str(consMsg)) - continue consumerVersionLoop - } - case <-time.After(3 * time.Second): - t.Fatalf("Timeout waiting for: index=%d, offset=%d, msg=%s", i, prodMsg.Offset, prodMsg.Value) - } - } - } -} diff --git a/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go b/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go deleted file mode 100644 index 436f35ef..00000000 --- a/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package sarama - -import ( - "testing" -) - -func TestFuncOffsetManager(t *testing.T) { - checkKafkaVersion(t, "0.8.2") - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - client, err := NewClient(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - - offsetManager, err := NewOffsetManagerFromClient("sarama.TestFuncOffsetManager", client) - if err != nil { - t.Fatal(err) - } - - pom1, err := offsetManager.ManagePartition("test.1", 0) - if err != nil { - t.Fatal(err) - } - - pom1.MarkOffset(10, "test metadata") - safeClose(t, pom1) - - pom2, err := offsetManager.ManagePartition("test.1", 0) - if err != nil { - t.Fatal(err) - } - - offset, metadata := pom2.NextOffset() - - if offset != 10 { - t.Errorf("Expected the next offset to be 10, found %d.", offset) - } - if metadata != "test metadata" { - t.Errorf("Expected metadata to be 'test metadata', found %s.", metadata) - } - - safeClose(t, pom2) - safeClose(t, offsetManager) - safeClose(t, client) -} diff --git a/vendor/github.com/Shopify/sarama/functional_producer_test.go b/vendor/github.com/Shopify/sarama/functional_producer_test.go deleted file mode 100644 index 91bf3b5e..00000000 --- a/vendor/github.com/Shopify/sarama/functional_producer_test.go +++ /dev/null @@ -1,323 +0,0 @@ -package sarama - -import ( - "fmt" - "os" - "sync" - "testing" - "time" - - toxiproxy "github.com/Shopify/toxiproxy/client" - "github.com/rcrowley/go-metrics" -) - -const TestBatchSize = 1000 - -func TestFuncProducing(t *testing.T) { - config := NewConfig() - testProducingMessages(t, config) -} - -func TestFuncProducingGzip(t *testing.T) { - config := NewConfig() - config.Producer.Compression = CompressionGZIP - testProducingMessages(t, config) -} - -func TestFuncProducingSnappy(t *testing.T) { - config := NewConfig() - config.Producer.Compression = CompressionSnappy - testProducingMessages(t, config) -} - -func TestFuncProducingNoResponse(t *testing.T) { - config := NewConfig() - config.Producer.RequiredAcks = NoResponse - testProducingMessages(t, config) -} - -func TestFuncProducingFlushing(t *testing.T) { - config := NewConfig() - config.Producer.Flush.Messages = TestBatchSize / 8 - config.Producer.Flush.Frequency = 250 * time.Millisecond - testProducingMessages(t, config) -} - -func TestFuncMultiPartitionProduce(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - config := NewConfig() - config.ChannelBufferSize = 20 - config.Producer.Flush.Frequency = 50 * time.Millisecond - config.Producer.Flush.Messages = 200 - config.Producer.Return.Successes = true - producer, err := NewSyncProducer(kafkaBrokers, config) - if err != nil { - t.Fatal(err) - } - - var wg sync.WaitGroup - wg.Add(TestBatchSize) - - for i := 1; i <= TestBatchSize; i++ { - go func(i int) { - defer wg.Done() - msg := &ProducerMessage{Topic: "test.64", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))} - if _, _, err := producer.SendMessage(msg); err != nil { - t.Error(i, err) - } - }(i) - } - - wg.Wait() - if err := producer.Close(); err != nil { - t.Error(err) - } -} - -func TestFuncProducingToInvalidTopic(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - producer, err := NewSyncProducer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - - if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - - if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - - safeClose(t, producer) -} - -func testProducingMessages(t *testing.T, config *Config) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - // Configure some latency in order to properly validate the request latency metric - for _, proxy := range Proxies { - if _, err := proxy.AddToxic("", "latency", "", 1, toxiproxy.Attributes{"latency": 10}); err != nil { - t.Fatal("Unable to configure latency toxicity", err) - } - } - - config.Producer.Return.Successes = true - config.Consumer.Return.Errors = true - - client, err := NewClient(kafkaBrokers, config) - if err != nil { - t.Fatal(err) - } - - // Keep in mind the current offset - initialOffset, err := client.GetOffset("test.1", 0, OffsetNewest) - if err != nil { - t.Fatal(err) - } - - producer, err := NewAsyncProducerFromClient(client) - if err != nil { - t.Fatal(err) - } - - expectedResponses := TestBatchSize - for i := 1; i <= TestBatchSize; { - msg := &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))} - select { - case producer.Input() <- msg: - i++ - case ret := <-producer.Errors(): - t.Fatal(ret.Err) - case <-producer.Successes(): - expectedResponses-- - } - } - for expectedResponses > 0 { - select { - case ret := <-producer.Errors(): - t.Fatal(ret.Err) - case <-producer.Successes(): - expectedResponses-- - } - } - safeClose(t, producer) - - // Validate producer metrics before using the consumer minus the offset request - validateMetrics(t, client) - - master, err := NewConsumerFromClient(client) - if err != nil { - t.Fatal(err) - } - consumer, err := master.ConsumePartition("test.1", 0, initialOffset) - if err != nil { - t.Fatal(err) - } - - for i := 1; i <= TestBatchSize; i++ { - select { - case <-time.After(10 * time.Second): - t.Fatal("Not received any more events in the last 10 seconds.") - - case err := <-consumer.Errors(): - t.Error(err) - - case message := <-consumer.Messages(): - if string(message.Value) != fmt.Sprintf("testing %d", i) { - t.Fatalf("Unexpected message with index %d: %s", i, message.Value) - } - } - - } - safeClose(t, consumer) - safeClose(t, client) -} - -func validateMetrics(t *testing.T, client Client) { - // Get the broker used by test1 topic - var broker *Broker - if partitions, err := client.Partitions("test.1"); err != nil { - t.Error(err) - } else { - for _, partition := range partitions { - if b, err := client.Leader("test.1", partition); err != nil { - t.Error(err) - } else { - if broker != nil && b != broker { - t.Fatal("Expected only one broker, got at least 2") - } - broker = b - } - } - } - - metricValidators := newMetricValidators() - noResponse := client.Config().Producer.RequiredAcks == NoResponse - compressionEnabled := client.Config().Producer.Compression != CompressionNone - - // We are adding 10ms of latency to all requests with toxiproxy - minRequestLatencyInMs := 10 - if noResponse { - // but when we do not wait for a response it can be less than 1ms - minRequestLatencyInMs = 0 - } - - // We read at least 1 byte from the broker - metricValidators.registerForAllBrokers(broker, minCountMeterValidator("incoming-byte-rate", 1)) - // in at least 3 global requests (1 for metadata request, 1 for offset request and N for produce request) - metricValidators.register(minCountMeterValidator("request-rate", 3)) - metricValidators.register(minCountHistogramValidator("request-size", 3)) - metricValidators.register(minValHistogramValidator("request-size", 1)) - metricValidators.register(minValHistogramValidator("request-latency-in-ms", minRequestLatencyInMs)) - // and at least 2 requests to the registered broker (offset + produces) - metricValidators.registerForBroker(broker, minCountMeterValidator("request-rate", 2)) - metricValidators.registerForBroker(broker, minCountHistogramValidator("request-size", 2)) - metricValidators.registerForBroker(broker, minValHistogramValidator("request-size", 1)) - metricValidators.registerForBroker(broker, minValHistogramValidator("request-latency-in-ms", minRequestLatencyInMs)) - - // We send at least 1 batch - metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("batch-size", 1)) - metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("batch-size", 1)) - if compressionEnabled { - // We record compression ratios between [0.50,-10.00] (50-1000 with a histogram) for at least one "fake" record - metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("compression-ratio", 1)) - metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 50)) - metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 1000)) - } else { - // We record compression ratios of 1.00 (100 with a histogram) for every TestBatchSize record - metricValidators.registerForGlobalAndTopic("test_1", countHistogramValidator("compression-ratio", TestBatchSize)) - metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 100)) - metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 100)) - } - - // We send exactly TestBatchSize messages - metricValidators.registerForGlobalAndTopic("test_1", countMeterValidator("record-send-rate", TestBatchSize)) - // We send at least one record per request - metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("records-per-request", 1)) - metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("records-per-request", 1)) - - // We receive at least 1 byte from the broker - metricValidators.registerForAllBrokers(broker, minCountMeterValidator("outgoing-byte-rate", 1)) - if noResponse { - // in exactly 2 global responses (metadata + offset) - metricValidators.register(countMeterValidator("response-rate", 2)) - metricValidators.register(minCountHistogramValidator("response-size", 2)) - metricValidators.register(minValHistogramValidator("response-size", 1)) - // and exactly 1 offset response for the registered broker - metricValidators.registerForBroker(broker, countMeterValidator("response-rate", 1)) - metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 1)) - metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1)) - } else { - // in at least 3 global responses (metadata + offset + produces) - metricValidators.register(minCountMeterValidator("response-rate", 3)) - metricValidators.register(minCountHistogramValidator("response-size", 3)) - metricValidators.register(minValHistogramValidator("response-size", 1)) - // and at least 2 for the registered broker - metricValidators.registerForBroker(broker, minCountMeterValidator("response-rate", 2)) - metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 2)) - metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1)) - } - - // Run the validators - metricValidators.run(t, client.Config().MetricRegistry) -} - -// Benchmarks - -func BenchmarkProducerSmall(b *testing.B) { - benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 128))) -} -func BenchmarkProducerMedium(b *testing.B) { - benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 1024))) -} -func BenchmarkProducerLarge(b *testing.B) { - benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 8192))) -} -func BenchmarkProducerSmallSinglePartition(b *testing.B) { - benchmarkProducer(b, nil, "test.1", ByteEncoder(make([]byte, 128))) -} -func BenchmarkProducerMediumSnappy(b *testing.B) { - conf := NewConfig() - conf.Producer.Compression = CompressionSnappy - benchmarkProducer(b, conf, "test.1", ByteEncoder(make([]byte, 1024))) -} - -func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) { - setupFunctionalTest(b) - defer teardownFunctionalTest(b) - - metricsDisable := os.Getenv("METRICS_DISABLE") - if metricsDisable != "" { - previousUseNilMetrics := metrics.UseNilMetrics - Logger.Println("Disabling metrics using no-op implementation") - metrics.UseNilMetrics = true - // Restore previous setting - defer func() { - metrics.UseNilMetrics = previousUseNilMetrics - }() - } - - producer, err := NewAsyncProducer(kafkaBrokers, conf) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - - for i := 1; i <= b.N; { - msg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf("%d", i)), Value: value} - select { - case producer.Input() <- msg: - i++ - case ret := <-producer.Errors(): - b.Fatal(ret.Err) - } - } - safeClose(b, producer) -} diff --git a/vendor/github.com/Shopify/sarama/functional_test.go b/vendor/github.com/Shopify/sarama/functional_test.go deleted file mode 100644 index 846eb29f..00000000 --- a/vendor/github.com/Shopify/sarama/functional_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package sarama - -import ( - "log" - "math/rand" - "net" - "os" - "strconv" - "strings" - "testing" - "time" - - toxiproxy "github.com/Shopify/toxiproxy/client" -) - -const ( - VagrantToxiproxy = "http://192.168.100.67:8474" - VagrantKafkaPeers = "192.168.100.67:9091,192.168.100.67:9092,192.168.100.67:9093,192.168.100.67:9094,192.168.100.67:9095" - VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185" -) - -var ( - kafkaAvailable, kafkaRequired bool - kafkaBrokers []string - - proxyClient *toxiproxy.Client - Proxies map[string]*toxiproxy.Proxy - ZKProxies = []string{"zk1", "zk2", "zk3", "zk4", "zk5"} - KafkaProxies = []string{"kafka1", "kafka2", "kafka3", "kafka4", "kafka5"} -) - -func init() { - if os.Getenv("DEBUG") == "true" { - Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) - } - - seed := time.Now().UTC().UnixNano() - if tmp := os.Getenv("TEST_SEED"); tmp != "" { - seed, _ = strconv.ParseInt(tmp, 0, 64) - } - Logger.Println("Using random seed:", seed) - rand.Seed(seed) - - proxyAddr := os.Getenv("TOXIPROXY_ADDR") - if proxyAddr == "" { - proxyAddr = VagrantToxiproxy - } - proxyClient = toxiproxy.NewClient(proxyAddr) - - kafkaPeers := os.Getenv("KAFKA_PEERS") - if kafkaPeers == "" { - kafkaPeers = VagrantKafkaPeers - } - kafkaBrokers = strings.Split(kafkaPeers, ",") - - if c, err := net.DialTimeout("tcp", kafkaBrokers[0], 5*time.Second); err == nil { - if err = c.Close(); err == nil { - kafkaAvailable = true - } - } - - kafkaRequired = os.Getenv("CI") != "" -} - -func checkKafkaAvailability(t testing.TB) { - if !kafkaAvailable { - if kafkaRequired { - t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) - } else { - t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) - } - } -} - -func checkKafkaVersion(t testing.TB, requiredVersion string) { - kafkaVersion := os.Getenv("KAFKA_VERSION") - if kafkaVersion == "" { - t.Logf("No KAFKA_VERSION set. This test requires Kafka version %s or higher. Continuing...", requiredVersion) - } else { - available := parseKafkaVersion(kafkaVersion) - required := parseKafkaVersion(requiredVersion) - if !available.satisfies(required) { - t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion) - } - } -} - -func resetProxies(t testing.TB) { - if err := proxyClient.ResetState(); err != nil { - t.Error(err) - } - Proxies = nil -} - -func fetchProxies(t testing.TB) { - var err error - Proxies, err = proxyClient.Proxies() - if err != nil { - t.Fatal(err) - } -} - -func SaveProxy(t *testing.T, px string) { - if err := Proxies[px].Save(); err != nil { - t.Fatal(err) - } -} - -func setupFunctionalTest(t testing.TB) { - checkKafkaAvailability(t) - resetProxies(t) - fetchProxies(t) -} - -func teardownFunctionalTest(t testing.TB) { - resetProxies(t) -} - -type kafkaVersion []int - -func (kv kafkaVersion) satisfies(other kafkaVersion) bool { - var ov int - for index, v := range kv { - if len(other) <= index { - ov = 0 - } else { - ov = other[index] - } - - if v < ov { - return false - } else if v > ov { - return true - } - } - return true -} - -func parseKafkaVersion(version string) kafkaVersion { - numbers := strings.Split(version, ".") - result := make(kafkaVersion, 0, len(numbers)) - for _, number := range numbers { - nr, _ := strconv.Atoi(number) - result = append(result, nr) - } - - return result -} diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod new file mode 100644 index 00000000..f4cc90c0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/go.mod @@ -0,0 +1,28 @@ +module github.com/Shopify/sarama + +go 1.13 + +require ( + github.com/Shopify/toxiproxy v2.1.4+incompatible + github.com/davecgh/go-spew v1.1.1 + github.com/eapache/go-resiliency v1.2.0 + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 + github.com/eapache/queue v1.1.0 + github.com/fortytw2/leaktest v1.3.0 + github.com/frankban/quicktest v1.11.3 // indirect + github.com/golang/snappy v0.0.2 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.2 + github.com/klauspost/compress v1.11.7 + github.com/kr/text v0.2.0 // indirect + github.com/pierrec/lz4 v2.6.0+incompatible + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 + github.com/stretchr/testify v1.7.0 + github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c + github.com/xdg/stringprep v1.0.0 // indirect + golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect + golang.org/x/net v0.0.0-20210119194325-5f4716e94777 + golang.org/x/text v0.3.5 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum new file mode 100644 index 00000000..94afb2ae --- /dev/null +++ b/vendor/github.com/Shopify/sarama/go.sum @@ -0,0 +1,98 @@ +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go new file mode 100644 index 00000000..44fd4462 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go @@ -0,0 +1,258 @@ +package sarama + +import ( + "encoding/asn1" + "encoding/binary" + "fmt" + "io" + "strings" + "time" + + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/gssapi" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +const ( + TOK_ID_KRB_AP_REQ = 256 + GSS_API_GENERIC_TAG = 0x60 + KRB5_USER_AUTH = 1 + KRB5_KEYTAB_AUTH = 2 + GSS_API_INITIAL = 1 + GSS_API_VERIFY = 2 + GSS_API_FINISH = 3 +) + +type GSSAPIConfig struct { + AuthType int + KeyTabPath string + KerberosConfigPath string + ServiceName string + Username string + Password string + Realm string + DisablePAFXFAST bool +} + +type GSSAPIKerberosAuth struct { + Config *GSSAPIConfig + ticket messages.Ticket + encKey types.EncryptionKey + NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error) + step int +} + +type KerberosClient interface { + Login() error + GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) + Domain() string + CName() types.PrincipalName + Destroy() +} + +/* +* +* Appends length in big endian before payload, and send it to kafka +* + */ + +func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) { + length := len(payload) + finalPackage := make([]byte, length+4) //4 byte length header + payload + copy(finalPackage[4:], payload) + binary.BigEndian.PutUint32(finalPackage, uint32(length)) + bytes, err := broker.conn.Write(finalPackage) + if err != nil { + return bytes, err + } + return bytes, nil +} + +/* +* +* Read length (4 bytes) and then read the payload +* + */ + +func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) { + bytesRead := 0 + lengthInBytes := make([]byte, 4) + bytes, err := io.ReadFull(broker.conn, lengthInBytes) + if err != nil { + return nil, bytesRead, err + } + bytesRead += bytes + payloadLength := binary.BigEndian.Uint32(lengthInBytes) + payloadBytes := make([]byte, payloadLength) // buffer for read.. + bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes + if err != nil { + return payloadBytes, bytesRead, err + } + bytesRead += bytes + return payloadBytes, bytesRead, nil +} + +func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte { + a := make([]byte, 24) + flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf} + binary.LittleEndian.PutUint32(a[:4], 16) + for _, i := range flags { + f := binary.LittleEndian.Uint32(a[20:24]) + f |= uint32(i) + binary.LittleEndian.PutUint32(a[20:24], f) + } + return a +} + +/* +* +* Construct Kerberos AP_REQ package, conforming to RFC-4120 +* https://tools.ietf.org/html/rfc4120#page-84 +* + */ +func (krbAuth *GSSAPIKerberosAuth) createKrb5Token( + domain string, cname types.PrincipalName, + ticket messages.Ticket, + sessionKey types.EncryptionKey) ([]byte, error) { + auth, err := types.NewAuthenticator(domain, cname) + if err != nil { + return nil, err + } + auth.Cksum = types.Checksum{ + CksumType: chksumtype.GSSAPI, + Checksum: krbAuth.newAuthenticatorChecksum(), + } + APReq, err := messages.NewAPReq( + ticket, + sessionKey, + auth, + ) + if err != nil { + return nil, err + } + aprBytes := make([]byte, 2) + binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ) + tb, err := APReq.Marshal() + if err != nil { + return nil, err + } + aprBytes = append(aprBytes, tb...) + return aprBytes, nil +} + +/* +* +* Append the GSS-API header to the payload, conforming to RFC-2743 +* Section 3.1, Mechanism-Independent Token Format +* +* https://tools.ietf.org/html/rfc2743#page-81 +* +* GSSAPIHeader + +* + */ +func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) { + oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID()) + if err != nil { + return nil, err + } + tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload)) + GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...) + GSSHeader = append(GSSHeader, oidBytes...) + GSSPackage := append(GSSHeader, payload...) + return GSSPackage, nil +} + +func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) { + switch krbAuth.step { + case GSS_API_INITIAL: + aprBytes, err := krbAuth.createKrb5Token( + kerberosClient.Domain(), + kerberosClient.CName(), + krbAuth.ticket, + krbAuth.encKey) + if err != nil { + return nil, err + } + krbAuth.step = GSS_API_VERIFY + return krbAuth.appendGSSAPIHeader(aprBytes) + case GSS_API_VERIFY: + wrapTokenReq := gssapi.WrapToken{} + if err := wrapTokenReq.Unmarshal(bytes, true); err != nil { + return nil, err + } + // Validate response. + isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL) + if !isValid { + return nil, err + } + + wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey) + if err != nil { + return nil, err + } + krbAuth.step = GSS_API_FINISH + return wrapTokenResponse.Marshal() + } + return nil, nil +} + +/* This does the handshake for authorization */ +func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error { + kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config) + if err != nil { + Logger.Printf("Kerberos client error: %s", err) + return err + } + + err = kerberosClient.Login() + if err != nil { + Logger.Printf("Kerberos client error: %s", err) + return err + } + // Construct SPN using serviceName and host + // SPN format: / + + host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part + spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host) + + ticket, encKey, err := kerberosClient.GetServiceTicket(spn) + + if err != nil { + Logger.Printf("Error getting Kerberos service ticket : %s", err) + return err + } + krbAuth.ticket = ticket + krbAuth.encKey = encKey + krbAuth.step = GSS_API_INITIAL + var receivedBytes []byte = nil + defer kerberosClient.Destroy() + for { + packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + requestTime := time.Now() + bytesWritten, err := krbAuth.writePackage(broker, packBytes) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + broker.updateOutgoingCommunicationMetrics(bytesWritten) + if krbAuth.step == GSS_API_VERIFY { + bytesRead := 0 + receivedBytes, bytesRead, err = krbAuth.readPackage(broker) + requestLatency := time.Since(requestTime) + broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + } else if krbAuth.step == GSS_API_FINISH { + return nil + } + } +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go index ce49c473..e9d9af19 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_request.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go @@ -42,6 +42,10 @@ func (r *HeartbeatRequest) version() int16 { return 0 } +func (r *HeartbeatRequest) headerVersion() int16 { + return 1 +} + func (r *HeartbeatRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request_test.go b/vendor/github.com/Shopify/sarama/heartbeat_request_test.go deleted file mode 100644 index da6cd18f..00000000 --- a/vendor/github.com/Shopify/sarama/heartbeat_request_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package sarama - -import "testing" - -var ( - basicHeartbeatRequest = []byte{ - 0, 3, 'f', 'o', 'o', // Group ID - 0x00, 0x01, 0x02, 0x03, // Generatiuon ID - 0, 3, 'b', 'a', 'z', // Member ID - } -) - -func TestHeartbeatRequest(t *testing.T) { - var request *HeartbeatRequest - - request = new(HeartbeatRequest) - request.GroupId = "foo" - request.GenerationId = 66051 - request.MemberId = "baz" - testRequest(t, "basic", request, basicHeartbeatRequest) -} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go index 766f5fde..577ab72e 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_response.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go @@ -27,6 +27,10 @@ func (r *HeartbeatResponse) version() int16 { return 0 } +func (r *HeartbeatResponse) headerVersion() int16 { + return 0 +} + func (r *HeartbeatResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response_test.go b/vendor/github.com/Shopify/sarama/heartbeat_response_test.go deleted file mode 100644 index 5bcbec98..00000000 --- a/vendor/github.com/Shopify/sarama/heartbeat_response_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package sarama - -import "testing" - -var ( - heartbeatResponseNoError = []byte{ - 0x00, 0x00} -) - -func TestHeartbeatResponse(t *testing.T) { - var response *HeartbeatResponse - - response = new(HeartbeatResponse) - testVersionDecodable(t, "no error", response, heartbeatResponseNoError, 0) - if response.Err != ErrNoError { - t.Error("Decoding error failed: no error expected but found", response.Err) - } -} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go index 8ceb6c23..68944439 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go +++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go @@ -38,6 +38,10 @@ func (i *InitProducerIDRequest) version() int16 { return 0 } +func (i *InitProducerIDRequest) headerVersion() int16 { + return 1 +} + func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request_test.go b/vendor/github.com/Shopify/sarama/init_producer_id_request_test.go deleted file mode 100644 index 5c83d851..00000000 --- a/vendor/github.com/Shopify/sarama/init_producer_id_request_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - initProducerIDRequestNull = []byte{ - 255, 255, - 0, 0, 0, 100, - } - - initProducerIDRequest = []byte{ - 0, 3, 't', 'x', 'n', - 0, 0, 0, 100, - } -) - -func TestInitProducerIDRequest(t *testing.T) { - req := &InitProducerIDRequest{ - TransactionTimeout: 100 * time.Millisecond, - } - - testRequest(t, "null transaction id", req, initProducerIDRequestNull) - - transactionID := "txn" - req.TransactionalID = &transactionID - - testRequest(t, "transaction id", req, initProducerIDRequest) -} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go index 1b32eb08..3e1242bf 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go +++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go @@ -50,6 +50,10 @@ func (i *InitProducerIDResponse) version() int16 { return 0 } +func (i *InitProducerIDResponse) headerVersion() int16 { + return 0 +} + func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response_test.go b/vendor/github.com/Shopify/sarama/init_producer_id_response_test.go deleted file mode 100644 index b0649386..00000000 --- a/vendor/github.com/Shopify/sarama/init_producer_id_response_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - initProducerIDResponse = []byte{ - 0, 0, 0, 100, - 0, 0, - 0, 0, 0, 0, 0, 0, 31, 64, // producerID = 8000 - 0, 0, // epoch - } - - initProducerIDRequestError = []byte{ - 0, 0, 0, 100, - 0, 51, - 255, 255, 255, 255, 255, 255, 255, 255, - 0, 0, - } -) - -func TestInitProducerIDResponse(t *testing.T) { - resp := &InitProducerIDResponse{ - ThrottleTime: 100 * time.Millisecond, - ProducerID: 8000, - ProducerEpoch: 0, - } - - testResponse(t, "", resp, initProducerIDResponse) - - resp.Err = ErrConcurrentTransactions - resp.ProducerID = -1 - - testResponse(t, "with error", resp, initProducerIDRequestError) -} diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/Shopify/sarama/interceptors.go new file mode 100644 index 00000000..d0d33e52 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/interceptors.go @@ -0,0 +1,43 @@ +package sarama + +// ProducerInterceptor allows you to intercept (and possibly mutate) the records +// received by the producer before they are published to the Kafka cluster. +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation +type ProducerInterceptor interface { + + // OnSend is called when the producer message is intercepted. Please avoid + // modifying the message until it's safe to do so, as this is _not_ a copy + // of the message. + OnSend(*ProducerMessage) +} + +// ConsumerInterceptor allows you to intercept (and possibly mutate) the records +// received by the consumer before they are sent to the messages channel. +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation +type ConsumerInterceptor interface { + + // OnConsume is called when the consumed message is intercepted. Please + // avoid modifying the message until it's safe to do so, as this is _not_ a + // copy of the message. + OnConsume(*ConsumerMessage) +} + +func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) { + defer func() { + if r := recover(); r != nil { + Logger.Printf("Error when calling producer interceptor: %s, %w\n", interceptor, r) + } + }() + + interceptor.OnSend(msg) +} + +func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) { + defer func() { + if r := recover(); r != nil { + Logger.Printf("Error when calling consumer interceptor: %s, %w\n", interceptor, r) + } + }() + + interceptor.OnConsume(msg) +} diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go index 97e9299e..3734e82e 100644 --- a/vendor/github.com/Shopify/sarama/join_group_request.go +++ b/vendor/github.com/Shopify/sarama/join_group_request.go @@ -134,6 +134,10 @@ func (r *JoinGroupRequest) version() int16 { return r.Version } +func (r *JoinGroupRequest) headerVersion() int16 { + return 1 +} + func (r *JoinGroupRequest) requiredVersion() KafkaVersion { switch r.Version { case 2: diff --git a/vendor/github.com/Shopify/sarama/join_group_request_test.go b/vendor/github.com/Shopify/sarama/join_group_request_test.go deleted file mode 100644 index a2e17f98..00000000 --- a/vendor/github.com/Shopify/sarama/join_group_request_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package sarama - -import "testing" - -var ( - joinGroupRequestV0_NoProtocols = []byte{ - 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID - 0, 0, 0, 100, // Session timeout - 0, 0, // Member ID - 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type - 0, 0, 0, 0, // 0 protocol groups - } - - joinGroupRequestV0_OneProtocol = []byte{ - 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID - 0, 0, 0, 100, // Session timeout - 0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID - 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type - 0, 0, 0, 1, // 1 group protocol - 0, 3, 'o', 'n', 'e', // Protocol name - 0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata - } - - joinGroupRequestV1 = []byte{ - 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID - 0, 0, 0, 100, // Session timeout - 0, 0, 0, 200, // Rebalance timeout - 0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID - 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type - 0, 0, 0, 1, // 1 group protocol - 0, 3, 'o', 'n', 'e', // Protocol name - 0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata - } -) - -func TestJoinGroupRequest(t *testing.T) { - request := new(JoinGroupRequest) - request.GroupId = "TestGroup" - request.SessionTimeout = 100 - request.ProtocolType = "consumer" - testRequest(t, "V0: no protocols", request, joinGroupRequestV0_NoProtocols) -} - -func TestJoinGroupRequestV0_OneProtocol(t *testing.T) { - request := new(JoinGroupRequest) - request.GroupId = "TestGroup" - request.SessionTimeout = 100 - request.MemberId = "OneProtocol" - request.ProtocolType = "consumer" - request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03}) - packet := testRequestEncode(t, "V0: one protocol", request, joinGroupRequestV0_OneProtocol) - request.GroupProtocols = make(map[string][]byte) - request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03} - testRequestDecode(t, "V0: one protocol", request, packet) -} - -func TestJoinGroupRequestDeprecatedEncode(t *testing.T) { - request := new(JoinGroupRequest) - request.GroupId = "TestGroup" - request.SessionTimeout = 100 - request.MemberId = "OneProtocol" - request.ProtocolType = "consumer" - request.GroupProtocols = make(map[string][]byte) - request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03} - packet := testRequestEncode(t, "V0: one protocol", request, joinGroupRequestV0_OneProtocol) - request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03}) - testRequestDecode(t, "V0: one protocol", request, packet) -} - -func TestJoinGroupRequestV1(t *testing.T) { - request := new(JoinGroupRequest) - request.Version = 1 - request.GroupId = "TestGroup" - request.SessionTimeout = 100 - request.RebalanceTimeout = 200 - request.MemberId = "OneProtocol" - request.ProtocolType = "consumer" - request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03}) - packet := testRequestEncode(t, "V1", request, joinGroupRequestV1) - request.GroupProtocols = make(map[string][]byte) - request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03} - testRequestDecode(t, "V1", request, packet) -} diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go index 5752acc8..54b0a45c 100644 --- a/vendor/github.com/Shopify/sarama/join_group_response.go +++ b/vendor/github.com/Shopify/sarama/join_group_response.go @@ -123,6 +123,10 @@ func (r *JoinGroupResponse) version() int16 { return r.Version } +func (r *JoinGroupResponse) headerVersion() int16 { + return 0 +} + func (r *JoinGroupResponse) requiredVersion() KafkaVersion { switch r.Version { case 2: diff --git a/vendor/github.com/Shopify/sarama/join_group_response_test.go b/vendor/github.com/Shopify/sarama/join_group_response_test.go deleted file mode 100644 index a43b37a9..00000000 --- a/vendor/github.com/Shopify/sarama/join_group_response_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package sarama - -import ( - "reflect" - "testing" -) - -var ( - joinGroupResponseV0_NoError = []byte{ - 0x00, 0x00, // No error - 0x00, 0x01, 0x02, 0x03, // Generation ID - 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen - 0, 3, 'f', 'o', 'o', // Leader ID - 0, 3, 'b', 'a', 'r', // Member ID - 0, 0, 0, 0, // No member info - } - - joinGroupResponseV0_WithError = []byte{ - 0, 23, // Error: inconsistent group protocol - 0x00, 0x00, 0x00, 0x00, // Generation ID - 0, 0, // Protocol name chosen - 0, 0, // Leader ID - 0, 0, // Member ID - 0, 0, 0, 0, // No member info - } - - joinGroupResponseV0_Leader = []byte{ - 0x00, 0x00, // No error - 0x00, 0x01, 0x02, 0x03, // Generation ID - 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen - 0, 3, 'f', 'o', 'o', // Leader ID - 0, 3, 'f', 'o', 'o', // Member ID == Leader ID - 0, 0, 0, 1, // 1 member - 0, 3, 'f', 'o', 'o', // Member ID - 0, 0, 0, 3, 0x01, 0x02, 0x03, // Member metadata - } - - joinGroupResponseV1 = []byte{ - 0x00, 0x00, // No error - 0x00, 0x01, 0x02, 0x03, // Generation ID - 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen - 0, 3, 'f', 'o', 'o', // Leader ID - 0, 3, 'b', 'a', 'r', // Member ID - 0, 0, 0, 0, // No member info - } - - joinGroupResponseV2 = []byte{ - 0, 0, 0, 100, - 0x00, 0x00, // No error - 0x00, 0x01, 0x02, 0x03, // Generation ID - 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen - 0, 3, 'f', 'o', 'o', // Leader ID - 0, 3, 'b', 'a', 'r', // Member ID - 0, 0, 0, 0, // No member info - } -) - -func TestJoinGroupResponseV0(t *testing.T) { - var response *JoinGroupResponse - - response = new(JoinGroupResponse) - testVersionDecodable(t, "no error", response, joinGroupResponseV0_NoError, 0) - if response.Err != ErrNoError { - t.Error("Decoding Err failed: no error expected but found", response.Err) - } - if response.GenerationId != 66051 { - t.Error("Decoding GenerationId failed, found:", response.GenerationId) - } - if response.LeaderId != "foo" { - t.Error("Decoding LeaderId failed, found:", response.LeaderId) - } - if response.MemberId != "bar" { - t.Error("Decoding MemberId failed, found:", response.MemberId) - } - if len(response.Members) != 0 { - t.Error("Decoding Members failed, found:", response.Members) - } - - response = new(JoinGroupResponse) - testVersionDecodable(t, "with error", response, joinGroupResponseV0_WithError, 0) - if response.Err != ErrInconsistentGroupProtocol { - t.Error("Decoding Err failed: ErrInconsistentGroupProtocol expected but found", response.Err) - } - if response.GenerationId != 0 { - t.Error("Decoding GenerationId failed, found:", response.GenerationId) - } - if response.LeaderId != "" { - t.Error("Decoding LeaderId failed, found:", response.LeaderId) - } - if response.MemberId != "" { - t.Error("Decoding MemberId failed, found:", response.MemberId) - } - if len(response.Members) != 0 { - t.Error("Decoding Members failed, found:", response.Members) - } - - response = new(JoinGroupResponse) - testVersionDecodable(t, "with error", response, joinGroupResponseV0_Leader, 0) - if response.Err != ErrNoError { - t.Error("Decoding Err failed: ErrNoError expected but found", response.Err) - } - if response.GenerationId != 66051 { - t.Error("Decoding GenerationId failed, found:", response.GenerationId) - } - if response.LeaderId != "foo" { - t.Error("Decoding LeaderId failed, found:", response.LeaderId) - } - if response.MemberId != "foo" { - t.Error("Decoding MemberId failed, found:", response.MemberId) - } - if len(response.Members) != 1 { - t.Error("Decoding Members failed, found:", response.Members) - } - if !reflect.DeepEqual(response.Members["foo"], []byte{0x01, 0x02, 0x03}) { - t.Error("Decoding foo member failed, found:", response.Members["foo"]) - } -} - -func TestJoinGroupResponseV1(t *testing.T) { - response := new(JoinGroupResponse) - testVersionDecodable(t, "no error", response, joinGroupResponseV1, 1) - if response.Err != ErrNoError { - t.Error("Decoding Err failed: no error expected but found", response.Err) - } - if response.GenerationId != 66051 { - t.Error("Decoding GenerationId failed, found:", response.GenerationId) - } - if response.GroupProtocol != "protocol" { - t.Error("Decoding GroupProtocol failed, found:", response.GroupProtocol) - } - if response.LeaderId != "foo" { - t.Error("Decoding LeaderId failed, found:", response.LeaderId) - } - if response.MemberId != "bar" { - t.Error("Decoding MemberId failed, found:", response.MemberId) - } - if response.Version != 1 { - t.Error("Decoding Version failed, found:", response.Version) - } - if len(response.Members) != 0 { - t.Error("Decoding Members failed, found:", response.Members) - } -} - -func TestJoinGroupResponseV2(t *testing.T) { - response := new(JoinGroupResponse) - testVersionDecodable(t, "no error", response, joinGroupResponseV2, 2) - if response.ThrottleTime != 100 { - t.Error("Decoding ThrottleTime failed, found:", response.ThrottleTime) - } - if response.Err != ErrNoError { - t.Error("Decoding Err failed: no error expected but found", response.Err) - } - if response.GenerationId != 66051 { - t.Error("Decoding GenerationId failed, found:", response.GenerationId) - } - if response.GroupProtocol != "protocol" { - t.Error("Decoding GroupProtocol failed, found:", response.GroupProtocol) - } - if response.LeaderId != "foo" { - t.Error("Decoding LeaderId failed, found:", response.LeaderId) - } - if response.MemberId != "bar" { - t.Error("Decoding MemberId failed, found:", response.MemberId) - } - if response.Version != 2 { - t.Error("Decoding Version failed, found:", response.Version) - } - if len(response.Members) != 0 { - t.Error("Decoding Members failed, found:", response.Members) - } -} diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go new file mode 100644 index 00000000..01a53193 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/kerberos_client.go @@ -0,0 +1,46 @@ +package sarama + +import ( + krb5client "github.com/jcmturner/gokrb5/v8/client" + krb5config "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/types" +) + +type KerberosGoKrb5Client struct { + krb5client.Client +} + +func (c *KerberosGoKrb5Client) Domain() string { + return c.Credentials.Domain() +} + +func (c *KerberosGoKrb5Client) CName() types.PrincipalName { + return c.Credentials.CName() +} + +// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens. +// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120). +// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities. +func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) { + cfg, err := krb5config.Load(config.KerberosConfigPath) + if err != nil { + return nil, err + } + return createClient(config, cfg) +} + +func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) { + var client *krb5client.Client + if config.AuthType == KRB5_KEYTAB_AUTH { + kt, err := keytab.Load(config.KeyTabPath) + if err != nil { + return nil, err + } + client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + } else { + client = krb5client.NewWithPassword(config.Username, + config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + } + return &KerberosGoKrb5Client{*client}, nil +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go index e1774274..d7789b68 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_request.go +++ b/vendor/github.com/Shopify/sarama/leave_group_request.go @@ -35,6 +35,10 @@ func (r *LeaveGroupRequest) version() int16 { return 0 } +func (r *LeaveGroupRequest) headerVersion() int16 { + return 1 +} + func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/leave_group_request_test.go b/vendor/github.com/Shopify/sarama/leave_group_request_test.go deleted file mode 100644 index c1fed6d2..00000000 --- a/vendor/github.com/Shopify/sarama/leave_group_request_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package sarama - -import "testing" - -var ( - basicLeaveGroupRequest = []byte{ - 0, 3, 'f', 'o', 'o', - 0, 3, 'b', 'a', 'r', - } -) - -func TestLeaveGroupRequest(t *testing.T) { - var request *LeaveGroupRequest - - request = new(LeaveGroupRequest) - request.GroupId = "foo" - request.MemberId = "bar" - testRequest(t, "basic", request, basicLeaveGroupRequest) -} diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go index d60c626d..25f8d5eb 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_response.go +++ b/vendor/github.com/Shopify/sarama/leave_group_response.go @@ -27,6 +27,10 @@ func (r *LeaveGroupResponse) version() int16 { return 0 } +func (r *LeaveGroupResponse) headerVersion() int16 { + return 0 +} + func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/leave_group_response_test.go b/vendor/github.com/Shopify/sarama/leave_group_response_test.go deleted file mode 100644 index 9207c666..00000000 --- a/vendor/github.com/Shopify/sarama/leave_group_response_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package sarama - -import "testing" - -var ( - leaveGroupResponseNoError = []byte{0x00, 0x00} - leaveGroupResponseWithError = []byte{0, 25} -) - -func TestLeaveGroupResponse(t *testing.T) { - var response *LeaveGroupResponse - - response = new(LeaveGroupResponse) - testVersionDecodable(t, "no error", response, leaveGroupResponseNoError, 0) - if response.Err != ErrNoError { - t.Error("Decoding error failed: no error expected but found", response.Err) - } - - response = new(LeaveGroupResponse) - testVersionDecodable(t, "with error", response, leaveGroupResponseWithError, 0) - if response.Err != ErrUnknownMemberId { - t.Error("Decoding error failed: ErrUnknownMemberId expected but found", response.Err) - } -} diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go index da199a70..7d864f6b 100644 --- a/vendor/github.com/Shopify/sarama/length_field.go +++ b/vendor/github.com/Shopify/sarama/length_field.go @@ -1,6 +1,9 @@ package sarama -import "encoding/binary" +import ( + "encoding/binary" + "sync" +) // LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. type lengthField struct { @@ -8,6 +11,20 @@ type lengthField struct { length int32 } +var lengthFieldPool = sync.Pool{} + +func acquireLengthField() *lengthField { + val := lengthFieldPool.Get() + if val != nil { + return val.(*lengthField) + } + return &lengthField{} +} + +func releaseLengthField(m *lengthField) { + lengthFieldPool.Put(m) +} + func (l *lengthField) decode(pd packetDecoder) error { var err error l.length, err = pd.getInt32() diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go index 3b16abf7..ed44cc27 100644 --- a/vendor/github.com/Shopify/sarama/list_groups_request.go +++ b/vendor/github.com/Shopify/sarama/list_groups_request.go @@ -19,6 +19,10 @@ func (r *ListGroupsRequest) version() int16 { return 0 } +func (r *ListGroupsRequest) headerVersion() int16 { + return 1 +} + func (r *ListGroupsRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/list_groups_request_test.go b/vendor/github.com/Shopify/sarama/list_groups_request_test.go deleted file mode 100644 index 2e977d9a..00000000 --- a/vendor/github.com/Shopify/sarama/list_groups_request_test.go +++ /dev/null @@ -1,7 +0,0 @@ -package sarama - -import "testing" - -func TestListGroupsRequest(t *testing.T) { - testRequest(t, "ListGroupsRequest", &ListGroupsRequest{}, []byte{}) -} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go index 56115d4c..777bae7e 100644 --- a/vendor/github.com/Shopify/sarama/list_groups_response.go +++ b/vendor/github.com/Shopify/sarama/list_groups_response.go @@ -64,6 +64,10 @@ func (r *ListGroupsResponse) version() int16 { return 0 } +func (r *ListGroupsResponse) headerVersion() int16 { + return 0 +} + func (r *ListGroupsResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/list_groups_response_test.go b/vendor/github.com/Shopify/sarama/list_groups_response_test.go deleted file mode 100644 index 41ab822f..00000000 --- a/vendor/github.com/Shopify/sarama/list_groups_response_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - listGroupsResponseEmpty = []byte{ - 0, 0, // no error - 0, 0, 0, 0, // no groups - } - - listGroupsResponseError = []byte{ - 0, 31, // no error - 0, 0, 0, 0, // ErrClusterAuthorizationFailed - } - - listGroupsResponseWithConsumer = []byte{ - 0, 0, // no error - 0, 0, 0, 1, // 1 group - 0, 3, 'f', 'o', 'o', // group name - 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // protocol type - } -) - -func TestListGroupsResponse(t *testing.T) { - var response *ListGroupsResponse - - response = new(ListGroupsResponse) - testVersionDecodable(t, "no error", response, listGroupsResponseEmpty, 0) - if response.Err != ErrNoError { - t.Error("Expected no gerror, found:", response.Err) - } - if len(response.Groups) != 0 { - t.Error("Expected no groups") - } - - response = new(ListGroupsResponse) - testVersionDecodable(t, "no error", response, listGroupsResponseError, 0) - if response.Err != ErrClusterAuthorizationFailed { - t.Error("Expected no gerror, found:", response.Err) - } - if len(response.Groups) != 0 { - t.Error("Expected no groups") - } - - response = new(ListGroupsResponse) - testVersionDecodable(t, "no error", response, listGroupsResponseWithConsumer, 0) - if response.Err != ErrNoError { - t.Error("Expected no gerror, found:", response.Err) - } - if len(response.Groups) != 1 { - t.Error("Expected one group") - } - if response.Groups["foo"] != "consumer" { - t.Error("Expected foo group to use consumer protocol") - } -} diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go new file mode 100644 index 00000000..c1ffa9ba --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go @@ -0,0 +1,98 @@ +package sarama + +type ListPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string][]int32 + Version int16 +} + +func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + + if err := pe.putCompactInt32Array(partitions); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string][]int32) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make([]int32, partitionCount) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + r.blocks[topic][j] = partition + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *ListPartitionReassignmentsRequest) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) { + if r.blocks == nil { + r.blocks = make(map[string][]int32) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = partitionIDs + } +} diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go new file mode 100644 index 00000000..4baa6a08 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go @@ -0,0 +1,169 @@ +package sarama + +type PartitionReplicaReassignmentsStatus struct { + Replicas []int32 + AddingReplicas []int32 + RemovingReplicas []int32 +} + +func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error { + if err := pe.putCompactInt32Array(b.Replicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) { + if b.Replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return err +} + +type ListPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus +} + +func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) { + if r.TopicStatus == nil { + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus) + } + partitions := r.TopicStatus[topic] + if partitions == nil { + partitions = make(map[int32]*PartitionReplicaReassignmentsStatus) + r.TopicStatus[topic] = partitions + } + + partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas} +} + +func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.TopicStatus)) + for topic, partitions := range r.TopicStatus { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + + block := &PartitionReplicaReassignmentsStatus{} + if err := block.decode(pd); err != nil { + return err + } + r.TopicStatus[topic][partition] = block + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *ListPartitionReassignmentsResponse) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go index fecdbfde..e48566b3 100644 --- a/vendor/github.com/Shopify/sarama/message.go +++ b/vendor/github.com/Shopify/sarama/message.go @@ -1,46 +1,52 @@ package sarama import ( - "bytes" - "compress/gzip" "fmt" - "io/ioutil" "time" +) - "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4" +const ( + //CompressionNone no compression + CompressionNone CompressionCodec = iota + //CompressionGZIP compression using GZIP + CompressionGZIP + //CompressionSnappy compression using snappy + CompressionSnappy + //CompressionLZ4 compression using LZ4 + CompressionLZ4 + //CompressionZSTD compression using ZSTD + CompressionZSTD + + // The lowest 3 bits contain the compression codec used for the message + compressionCodecMask int8 = 0x07 + + // Bit 3 set for "LogAppend" timestamps + timestampTypeMask = 0x08 + + // CompressionLevelDefault is the constant to use in CompressionLevel + // to have the default compression level for any codec. The value is picked + // that we don't use any existing compression levels. + CompressionLevelDefault = -1000 ) // CompressionCodec represents the various compression codecs recognized by Kafka in messages. type CompressionCodec int8 -// only the last two bits are really used -const compressionCodecMask int8 = 0x03 - -const ( - CompressionNone CompressionCodec = 0 - CompressionGZIP CompressionCodec = 1 - CompressionSnappy CompressionCodec = 2 - CompressionLZ4 CompressionCodec = 3 -) - func (cc CompressionCodec) String() string { return []string{ "none", "gzip", "snappy", "lz4", + "zstd", }[int(cc)] } -// CompressionLevelDefault is the constant to use in CompressionLevel -// to have the default compression level for any codec. The value is picked -// that we don't use any existing compression levels. -const CompressionLevelDefault = -1000 - +//Message is a kafka message type type Message struct { Codec CompressionCodec // codec used to compress the message contents CompressionLevel int // compression level + LogAppendTime bool // the used timestamp is LogAppendTime Key []byte // the message key, may be nil Value []byte // the message contents Set *MessageSet // the message set a message might wrap @@ -57,6 +63,9 @@ func (m *Message) encode(pe packetEncoder) error { pe.putInt8(m.Version) attributes := int8(m.Codec) & compressionCodecMask + if m.LogAppendTime { + attributes |= timestampTypeMask + } pe.putInt8(attributes) if m.Version >= 1 { @@ -76,47 +85,11 @@ func (m *Message) encode(pe packetEncoder) error { payload = m.compressedCache m.compressedCache = nil } else if m.Value != nil { - switch m.Codec { - case CompressionNone: - payload = m.Value - case CompressionGZIP: - var buf bytes.Buffer - var writer *gzip.Writer - if m.CompressionLevel != CompressionLevelDefault { - writer, err = gzip.NewWriterLevel(&buf, m.CompressionLevel) - if err != nil { - return err - } - } else { - writer = gzip.NewWriter(&buf) - } - if _, err = writer.Write(m.Value); err != nil { - return err - } - if err = writer.Close(); err != nil { - return err - } - m.compressedCache = buf.Bytes() - payload = m.compressedCache - case CompressionSnappy: - tmp := snappy.Encode(m.Value) - m.compressedCache = tmp - payload = m.compressedCache - case CompressionLZ4: - var buf bytes.Buffer - writer := lz4.NewWriter(&buf) - if _, err = writer.Write(m.Value); err != nil { - return err - } - if err = writer.Close(); err != nil { - return err - } - m.compressedCache = buf.Bytes() - payload = m.compressedCache - - default: - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} + payload, err = compress(m.Codec, m.CompressionLevel, m.Value) + if err != nil { + return err } + m.compressedCache = payload // Keep in mind the compressed payload size for metric gathering m.compressedSize = len(payload) } @@ -129,7 +102,10 @@ func (m *Message) encode(pe packetEncoder) error { } func (m *Message) decode(pd packetDecoder) (err error) { - err = pd.push(newCRC32Field(crcIEEE)) + crc32Decoder := acquireCrc32Field(crcIEEE) + defer releaseCrc32Field(crc32Decoder) + + err = pd.push(crc32Decoder) if err != nil { return err } @@ -148,6 +124,7 @@ func (m *Message) decode(pd packetDecoder) (err error) { return err } m.Codec = CompressionCodec(attribute & compressionCodecMask) + m.LogAppendTime = attribute×tampTypeMask == timestampTypeMask if m.Version == 1 { if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { @@ -172,50 +149,24 @@ func (m *Message) decode(pd packetDecoder) (err error) { switch m.Codec { case CompressionNone: // nothing to do - case CompressionGZIP: + default: if m.Value == nil { break } - reader, err := gzip.NewReader(bytes.NewReader(m.Value)) + + m.Value, err = decompress(m.Codec, m.Value) if err != nil { return err } - if m.Value, err = ioutil.ReadAll(reader); err != nil { - return err - } if err := m.decodeSet(); err != nil { return err } - case CompressionSnappy: - if m.Value == nil { - break - } - if m.Value, err = snappy.Decode(m.Value); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - case CompressionLZ4: - if m.Value == nil { - break - } - reader := lz4.NewReader(bytes.NewReader(m.Value)) - if m.Value, err = ioutil.ReadAll(reader); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - - default: - return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} } return pd.pop() } -// decodes a message set from a previousy encoded bulk-message +// decodes a message set from a previously encoded bulk-message func (m *Message) decodeSet() (err error) { pd := realDecoder{raw: m.Value} m.Set = &MessageSet{} diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go index 600c7c4d..6523ec2f 100644 --- a/vendor/github.com/Shopify/sarama/message_set.go +++ b/vendor/github.com/Shopify/sarama/message_set.go @@ -29,7 +29,10 @@ func (msb *MessageBlock) decode(pd packetDecoder) (err error) { return err } - if err = pd.push(&lengthField{}); err != nil { + lengthDecoder := acquireLengthField() + defer releaseLengthField(lengthDecoder) + + if err = pd.push(lengthDecoder); err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/message_test.go b/vendor/github.com/Shopify/sarama/message_test.go deleted file mode 100644 index 0eb02f26..00000000 --- a/vendor/github.com/Shopify/sarama/message_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - emptyMessage = []byte{ - 167, 236, 104, 3, // CRC - 0x00, // magic version byte - 0x00, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0xFF, 0xFF, 0xFF, 0xFF} // value - - emptyV1Message = []byte{ - 204, 47, 121, 217, // CRC - 0x01, // magic version byte - 0x00, // attribute flags - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // timestamp - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0xFF, 0xFF, 0xFF, 0xFF} // value - - emptyV2Message = []byte{ - 167, 236, 104, 3, // CRC - 0x02, // magic version byte - 0x00, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0xFF, 0xFF, 0xFF, 0xFF} // value - - emptyGzipMessage = []byte{ - 132, 99, 80, 148, //CRC - 0x00, // magic version byte - 0x01, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - // value - 0x00, 0x00, 0x00, 0x17, - 0x1f, 0x8b, - 0x08, - 0, 0, 0, 0, 0, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0} - - emptyLZ4Message = []byte{ - 132, 219, 238, 101, // CRC - 0x01, // version byte - 0x03, // attribute flags: lz4 - 0, 0, 1, 88, 141, 205, 89, 56, // timestamp - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0x00, 0x00, 0x00, 0x0f, // len - 0x04, 0x22, 0x4D, 0x18, // LZ4 magic number - 100, // LZ4 flags: version 01, block indepedant, content checksum - 112, 185, 0, 0, 0, 0, // LZ4 data - 5, 93, 204, 2, // LZ4 checksum - } - - emptyBulkSnappyMessage = []byte{ - 180, 47, 53, 209, //CRC - 0x00, // magic version byte - 0x02, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0, 0, 0, 42, - 130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic - 0, 0, 0, 1, // min version - 0, 0, 0, 1, // default version - 0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0} - - emptyBulkGzipMessage = []byte{ - 139, 160, 63, 141, //CRC - 0x00, // magic version byte - 0x01, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0x00, 0x00, 0x00, 0x27, // len - 0x1f, 0x8b, // Gzip Magic - 0x08, // deflate compressed - 0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0} - - emptyBulkLZ4Message = []byte{ - 246, 12, 188, 129, // CRC - 0x01, // Version - 0x03, // attribute flags (LZ4) - 255, 255, 249, 209, 212, 181, 73, 201, // timestamp - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0x00, 0x00, 0x00, 0x47, // len - 0x04, 0x22, 0x4D, 0x18, // magic number lz4 - 100, // lz4 flags 01100100 - // version: 01, block indep: 1, block checksum: 0, content size: 0, content checksum: 1, reserved: 00 - 112, 185, 52, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, - 71, 129, 23, 111, // LZ4 checksum - } -) - -func TestMessageEncoding(t *testing.T) { - message := Message{} - testEncodable(t, "empty", &message, emptyMessage) - - message.Value = []byte{} - message.Codec = CompressionGZIP - testEncodable(t, "empty gzip", &message, emptyGzipMessage) - - message.Value = []byte{} - message.Codec = CompressionLZ4 - message.Timestamp = time.Unix(1479847795, 0) - message.Version = 1 - testEncodable(t, "empty lz4", &message, emptyLZ4Message) -} - -func TestMessageDecoding(t *testing.T) { - message := Message{} - testDecodable(t, "empty", &message, emptyMessage) - if message.Codec != CompressionNone { - t.Error("Decoding produced compression codec where there was none.") - } - if message.Key != nil { - t.Error("Decoding produced key where there was none.") - } - if message.Value != nil { - t.Error("Decoding produced value where there was none.") - } - if message.Set != nil { - t.Error("Decoding produced set where there was none.") - } - - testDecodable(t, "empty gzip", &message, emptyGzipMessage) - if message.Codec != CompressionGZIP { - t.Error("Decoding produced incorrect compression codec (was gzip).") - } - if message.Key != nil { - t.Error("Decoding produced key where there was none.") - } - if message.Value == nil || len(message.Value) != 0 { - t.Error("Decoding produced nil or content-ful value where there was an empty array.") - } -} - -func TestMessageDecodingBulkSnappy(t *testing.T) { - message := Message{} - testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage) - if message.Codec != CompressionSnappy { - t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy) - } - if message.Key != nil { - t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) - } - if message.Set == nil { - t.Error("Decoding produced no set, but one was expected.") - } else if len(message.Set.Messages) != 2 { - t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) - } -} - -func TestMessageDecodingBulkGzip(t *testing.T) { - message := Message{} - testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage) - if message.Codec != CompressionGZIP { - t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP) - } - if message.Key != nil { - t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) - } - if message.Set == nil { - t.Error("Decoding produced no set, but one was expected.") - } else if len(message.Set.Messages) != 2 { - t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) - } -} - -func TestMessageDecodingBulkLZ4(t *testing.T) { - message := Message{} - testDecodable(t, "bulk lz4", &message, emptyBulkLZ4Message) - if message.Codec != CompressionLZ4 { - t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionLZ4) - } - if message.Key != nil { - t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) - } - if message.Set == nil { - t.Error("Decoding produced no set, but one was expected.") - } else if len(message.Set.Messages) != 2 { - t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) - } -} - -func TestMessageDecodingVersion1(t *testing.T) { - message := Message{Version: 1} - testDecodable(t, "decoding empty v1 message", &message, emptyV1Message) -} - -func TestMessageDecodingUnknownVersions(t *testing.T) { - message := Message{Version: 2} - err := decode(emptyV2Message, &message) - if err == nil { - t.Error("Decoding did not produce an error for an unknown magic byte") - } - if err.Error() != "kafka: error decoding packet: unknown magic byte (2)" { - t.Error("Decoding an unknown magic byte produced an unknown error ", err) - } -} diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go index 17dc4289..e835f5a9 100644 --- a/vendor/github.com/Shopify/sarama/metadata_request.go +++ b/vendor/github.com/Shopify/sarama/metadata_request.go @@ -37,15 +37,8 @@ func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { if err != nil { return err } - if size < 0 { - return nil - } else { - topicCount := size - if topicCount == 0 { - return nil - } - - r.Topics = make([]string, topicCount) + if size > 0 { + r.Topics = make([]string, size) for i := range r.Topics { topic, err := pd.getString() if err != nil { @@ -72,6 +65,10 @@ func (r *MetadataRequest) version() int16 { return r.Version } +func (r *MetadataRequest) headerVersion() int16 { + return 1 +} + func (r *MetadataRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/metadata_request_test.go b/vendor/github.com/Shopify/sarama/metadata_request_test.go deleted file mode 100644 index 727e48a2..00000000 --- a/vendor/github.com/Shopify/sarama/metadata_request_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package sarama - -import "testing" - -var ( - metadataRequestNoTopicsV0 = []byte{ - 0x00, 0x00, 0x00, 0x00} - - metadataRequestOneTopicV0 = []byte{ - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'} - - metadataRequestThreeTopicsV0 = []byte{ - 0x00, 0x00, 0x00, 0x03, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x03, 'b', 'a', 'r', - 0x00, 0x03, 'b', 'a', 'z'} - - metadataRequestNoTopicsV1 = []byte{ - 0xff, 0xff, 0xff, 0xff} - - metadataRequestAutoCreateV4 = append(metadataRequestOneTopicV0, byte(1)) - metadataRequestNoAutoCreateV4 = append(metadataRequestOneTopicV0, byte(0)) -) - -func TestMetadataRequestV0(t *testing.T) { - request := new(MetadataRequest) - testRequest(t, "no topics", request, metadataRequestNoTopicsV0) - - request.Topics = []string{"topic1"} - testRequest(t, "one topic", request, metadataRequestOneTopicV0) - - request.Topics = []string{"foo", "bar", "baz"} - testRequest(t, "three topics", request, metadataRequestThreeTopicsV0) -} - -func TestMetadataRequestV1(t *testing.T) { - request := new(MetadataRequest) - request.Version = 1 - testRequest(t, "no topics", request, metadataRequestNoTopicsV1) - - request.Topics = []string{"topic1"} - testRequest(t, "one topic", request, metadataRequestOneTopicV0) - - request.Topics = []string{"foo", "bar", "baz"} - testRequest(t, "three topics", request, metadataRequestThreeTopicsV0) -} - -func TestMetadataRequestV2(t *testing.T) { - request := new(MetadataRequest) - request.Version = 2 - testRequest(t, "no topics", request, metadataRequestNoTopicsV1) - - request.Topics = []string{"topic1"} - testRequest(t, "one topic", request, metadataRequestOneTopicV0) -} - -func TestMetadataRequestV3(t *testing.T) { - request := new(MetadataRequest) - request.Version = 3 - testRequest(t, "no topics", request, metadataRequestNoTopicsV1) - - request.Topics = []string{"topic1"} - testRequest(t, "one topic", request, metadataRequestOneTopicV0) -} - -func TestMetadataRequestV4(t *testing.T) { - request := new(MetadataRequest) - request.Version = 4 - request.Topics = []string{"topic1"} - request.AllowAutoTopicCreation = true - testRequest(t, "one topic", request, metadataRequestAutoCreateV4) - - request.AllowAutoTopicCreation = false - testRequest(t, "one topic", request, metadataRequestNoAutoCreateV4) -} diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go index c402d05f..0bb8702c 100644 --- a/vendor/github.com/Shopify/sarama/metadata_response.go +++ b/vendor/github.com/Shopify/sarama/metadata_response.go @@ -255,6 +255,10 @@ func (r *MetadataResponse) version() int16 { return r.Version } +func (r *MetadataResponse) headerVersion() int16 { + return 0 +} + func (r *MetadataResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: @@ -296,7 +300,7 @@ foundTopic: return tmatch } -func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { +func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) { tmatch := r.AddTopic(topic, ErrNoError) var pmatch *PartitionMetadata @@ -316,6 +320,6 @@ foundPartition: pmatch.Leader = brokerID pmatch.Replicas = replicas pmatch.Isr = isr + pmatch.OfflineReplicas = offline pmatch.Err = err - } diff --git a/vendor/github.com/Shopify/sarama/metadata_response_test.go b/vendor/github.com/Shopify/sarama/metadata_response_test.go deleted file mode 100644 index 04a4ce7f..00000000 --- a/vendor/github.com/Shopify/sarama/metadata_response_test.go +++ /dev/null @@ -1,290 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyMetadataResponseV0 = []byte{ - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - brokersNoTopicsMetadataResponseV0 = []byte{ - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x00, 0xab, 0xff, - 0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', - 0x00, 0x00, 0x00, 0x33, - - 0x00, 0x01, 0x02, 0x03, - 0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm', - 0x00, 0x00, 0x01, 0x11, - - 0x00, 0x00, 0x00, 0x00} - - topicsNoBrokersMetadataResponseV0 = []byte{ - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x00, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x04, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x07, - 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x00, - - 0x00, 0x00, - 0x00, 0x03, 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x00} - - brokersNoTopicsMetadataResponseV1 = []byte{ - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x00, 0xab, 0xff, - 0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', - 0x00, 0x00, 0x00, 0x33, - 0x00, 0x05, 'r', 'a', 'c', 'k', '0', - - 0x00, 0x01, 0x02, 0x03, - 0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm', - 0x00, 0x00, 0x01, 0x11, - 0x00, 0x05, 'r', 'a', 'c', 'k', '1', - - 0x00, 0x00, 0x00, 0x01, - - 0x00, 0x00, 0x00, 0x00} - - topicsNoBrokersMetadataResponseV1 = []byte{ - 0x00, 0x00, 0x00, 0x00, - - 0x00, 0x00, 0x00, 0x04, - - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x00, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x04, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x07, - 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x00, - - 0x00, 0x00, - 0x00, 0x03, 'b', 'a', 'r', - 0x01, - 0x00, 0x00, 0x00, 0x00} - - noBrokersNoTopicsWithThrottleTimeAndClusterIDV3 = []byte{ - 0x00, 0x00, 0x00, 0x10, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x09, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x00} - - noBrokersOneTopicWithOfflineReplicasV5 = []byte{ - 0x00, 0x00, 0x00, 0x05, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x09, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd', - 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x04, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x07, - 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, - } -) - -func TestEmptyMetadataResponseV0(t *testing.T) { - response := MetadataResponse{} - - testVersionDecodable(t, "empty, V0", &response, emptyMetadataResponseV0, 0) - if len(response.Brokers) != 0 { - t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") - } - if len(response.Topics) != 0 { - t.Error("Decoding produced", len(response.Topics), "topics where there were none!") - } -} - -func TestMetadataResponseWithBrokersV0(t *testing.T) { - response := MetadataResponse{} - - testVersionDecodable(t, "brokers, no topics, V0", &response, brokersNoTopicsMetadataResponseV0, 0) - if len(response.Brokers) != 2 { - t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!") - } - - if response.Brokers[0].id != 0xabff { - t.Error("Decoding produced invalid broker 0 id.") - } - if response.Brokers[0].addr != "localhost:51" { - t.Error("Decoding produced invalid broker 0 address.") - } - if response.Brokers[1].id != 0x010203 { - t.Error("Decoding produced invalid broker 1 id.") - } - if response.Brokers[1].addr != "google.com:273" { - t.Error("Decoding produced invalid broker 1 address.") - } - - if len(response.Topics) != 0 { - t.Error("Decoding produced", len(response.Topics), "topics where there were none!") - } -} - -func TestMetadataResponseWithTopicsV0(t *testing.T) { - response := MetadataResponse{} - - testVersionDecodable(t, "topics, no brokers, V0", &response, topicsNoBrokersMetadataResponseV0, 0) - if len(response.Brokers) != 0 { - t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") - } - - if len(response.Topics) != 2 { - t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!") - } - - if response.Topics[0].Err != ErrNoError { - t.Error("Decoding produced invalid topic 0 error.") - } - - if response.Topics[0].Name != "foo" { - t.Error("Decoding produced invalid topic 0 name.") - } - - if len(response.Topics[0].Partitions) != 1 { - t.Fatal("Decoding produced invalid partition count for topic 0.") - } - - if response.Topics[0].Partitions[0].Err != ErrInvalidMessageSize { - t.Error("Decoding produced invalid topic 0 partition 0 error.") - } - - if response.Topics[0].Partitions[0].ID != 0x01 { - t.Error("Decoding produced invalid topic 0 partition 0 id.") - } - - if response.Topics[0].Partitions[0].Leader != 0x07 { - t.Error("Decoding produced invalid topic 0 partition 0 leader.") - } - - if len(response.Topics[0].Partitions[0].Replicas) != 3 { - t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.") - } - for i := 0; i < 3; i++ { - if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) { - t.Error("Decoding produced invalid topic 0 partition 0 replica", i) - } - } - - if len(response.Topics[0].Partitions[0].Isr) != 0 { - t.Error("Decoding produced invalid topic 0 partition 0 isr length.") - } - - if response.Topics[1].Err != ErrNoError { - t.Error("Decoding produced invalid topic 1 error.") - } - - if response.Topics[1].Name != "bar" { - t.Error("Decoding produced invalid topic 0 name.") - } - - if len(response.Topics[1].Partitions) != 0 { - t.Error("Decoding produced invalid partition count for topic 1.") - } -} - -func TestMetadataResponseWithBrokersV1(t *testing.T) { - response := MetadataResponse{} - - testVersionDecodable(t, "topics, V1", &response, brokersNoTopicsMetadataResponseV1, 1) - if len(response.Brokers) != 2 { - t.Error("Decoding produced", len(response.Brokers), "brokers where there were 2!") - } - if response.Brokers[0].rack == nil || *response.Brokers[0].rack != "rack0" { - t.Error("Decoding produced invalid broker 0 rack.") - } - if response.Brokers[1].rack == nil || *response.Brokers[1].rack != "rack1" { - t.Error("Decoding produced invalid broker 1 rack.") - } - if response.ControllerID != 1 { - t.Error("Decoding produced", response.ControllerID, "should have been 1!") - } - if len(response.Topics) != 0 { - t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") - } -} - -func TestMetadataResponseWithTopicsV1(t *testing.T) { - response := MetadataResponse{} - - testVersionDecodable(t, "topics, V1", &response, topicsNoBrokersMetadataResponseV1, 1) - if len(response.Brokers) != 0 { - t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") - } - if response.ControllerID != 4 { - t.Error("Decoding produced", response.ControllerID, "should have been 4!") - } - if len(response.Topics) != 2 { - t.Error("Decoding produced", len(response.Topics), "topics where there were 2!") - } - if response.Topics[0].IsInternal { - t.Error("Decoding produced", response.Topics[0], "topic0 should have been false!") - } - if !response.Topics[1].IsInternal { - t.Error("Decoding produced", response.Topics[1], "topic1 should have been true!") - } -} - -func TestMetadataResponseWithThrottleTime(t *testing.T) { - response := MetadataResponse{} - - testVersionDecodable(t, "no topics, no brokers, throttle time and cluster Id V3", &response, noBrokersNoTopicsWithThrottleTimeAndClusterIDV3, 3) - if response.ThrottleTimeMs != int32(16) { - t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 16!") - } - if len(response.Brokers) != 0 { - t.Error("Decoding produced", response.Brokers, "should have been 0!") - } - if response.ControllerID != int32(1) { - t.Error("Decoding produced", response.ControllerID, "should have been 1!") - } - if *response.ClusterID != "clusterId" { - t.Error("Decoding produced", response.ClusterID, "should have been clusterId!") - } - if len(response.Topics) != 0 { - t.Error("Decoding produced", len(response.Topics), "should have been 0!") - } -} - -func TestMetadataResponseWithOfflineReplicasV5(t *testing.T) { - response := MetadataResponse{} - - testVersionDecodable(t, "no brokers, 1 topic with offline replica V5", &response, noBrokersOneTopicWithOfflineReplicasV5, 5) - if response.ThrottleTimeMs != int32(5) { - t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 5!") - } - if len(response.Brokers) != 0 { - t.Error("Decoding produced", response.Brokers, "should have been 0!") - } - if response.ControllerID != int32(2) { - t.Error("Decoding produced", response.ControllerID, "should have been 21!") - } - if *response.ClusterID != "clusterId" { - t.Error("Decoding produced", response.ClusterID, "should have been clusterId!") - } - if len(response.Topics) != 1 { - t.Error("Decoding produced", len(response.Topics), "should have been 1!") - } - if len(response.Topics[0].Partitions[0].OfflineReplicas) != 1 { - t.Error("Decoding produced", len(response.Topics[0].Partitions[0].OfflineReplicas), "should have been 1!") - } -} diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go index 4869708e..90e5a87f 100644 --- a/vendor/github.com/Shopify/sarama/metrics.go +++ b/vendor/github.com/Shopify/sarama/metrics.go @@ -28,14 +28,6 @@ func getMetricNameForBroker(name string, broker *Broker) string { return fmt.Sprintf(name+"-for-broker-%d", broker.ID()) } -func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter { - return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r) -} - -func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram { - return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r) -} - func getMetricNameForTopic(name string, topic string) string { // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy // cf. KAFKA-1902 and KAFKA-2337 diff --git a/vendor/github.com/Shopify/sarama/metrics_test.go b/vendor/github.com/Shopify/sarama/metrics_test.go deleted file mode 100644 index 789c0ff3..00000000 --- a/vendor/github.com/Shopify/sarama/metrics_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package sarama - -import ( - "testing" - - "github.com/rcrowley/go-metrics" -) - -func TestGetOrRegisterHistogram(t *testing.T) { - metricRegistry := metrics.NewRegistry() - histogram := getOrRegisterHistogram("name", metricRegistry) - - if histogram == nil { - t.Error("Unexpected nil histogram") - } - - // Fetch the metric - foundHistogram := metricRegistry.Get("name") - - if foundHistogram != histogram { - t.Error("Unexpected different histogram", foundHistogram, histogram) - } - - // Try to register the metric again - sameHistogram := getOrRegisterHistogram("name", metricRegistry) - - if sameHistogram != histogram { - t.Error("Unexpected different histogram", sameHistogram, histogram) - } -} - -func TestGetMetricNameForBroker(t *testing.T) { - metricName := getMetricNameForBroker("name", &Broker{id: 1}) - - if metricName != "name-for-broker-1" { - t.Error("Unexpected metric name", metricName) - } -} - -// Common type and functions for metric validation -type metricValidator struct { - name string - validator func(*testing.T, interface{}) -} - -type metricValidators []*metricValidator - -func newMetricValidators() metricValidators { - return make([]*metricValidator, 0, 32) -} - -func (m *metricValidators) register(validator *metricValidator) { - *m = append(*m, validator) -} - -func (m *metricValidators) registerForBroker(broker *Broker, validator *metricValidator) { - m.register(&metricValidator{getMetricNameForBroker(validator.name, broker), validator.validator}) -} - -func (m *metricValidators) registerForGlobalAndTopic(topic string, validator *metricValidator) { - m.register(&metricValidator{validator.name, validator.validator}) - m.register(&metricValidator{getMetricNameForTopic(validator.name, topic), validator.validator}) -} - -func (m *metricValidators) registerForAllBrokers(broker *Broker, validator *metricValidator) { - m.register(validator) - m.registerForBroker(broker, validator) -} - -func (m metricValidators) run(t *testing.T, r metrics.Registry) { - for _, metricValidator := range m { - metric := r.Get(metricValidator.name) - if metric == nil { - t.Error("No metric named", metricValidator.name) - } else { - metricValidator.validator(t, metric) - } - } -} - -func meterValidator(name string, extraValidator func(*testing.T, metrics.Meter)) *metricValidator { - return &metricValidator{ - name: name, - validator: func(t *testing.T, metric interface{}) { - if meter, ok := metric.(metrics.Meter); !ok { - t.Errorf("Expected meter metric for '%s', got %T", name, metric) - } else { - extraValidator(t, meter) - } - }, - } -} - -func countMeterValidator(name string, expectedCount int) *metricValidator { - return meterValidator(name, func(t *testing.T, meter metrics.Meter) { - count := meter.Count() - if count != int64(expectedCount) { - t.Errorf("Expected meter metric '%s' count = %d, got %d", name, expectedCount, count) - } - }) -} - -func minCountMeterValidator(name string, minCount int) *metricValidator { - return meterValidator(name, func(t *testing.T, meter metrics.Meter) { - count := meter.Count() - if count < int64(minCount) { - t.Errorf("Expected meter metric '%s' count >= %d, got %d", name, minCount, count) - } - }) -} - -func histogramValidator(name string, extraValidator func(*testing.T, metrics.Histogram)) *metricValidator { - return &metricValidator{ - name: name, - validator: func(t *testing.T, metric interface{}) { - if histogram, ok := metric.(metrics.Histogram); !ok { - t.Errorf("Expected histogram metric for '%s', got %T", name, metric) - } else { - extraValidator(t, histogram) - } - }, - } -} - -func countHistogramValidator(name string, expectedCount int) *metricValidator { - return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) { - count := histogram.Count() - if count != int64(expectedCount) { - t.Errorf("Expected histogram metric '%s' count = %d, got %d", name, expectedCount, count) - } - }) -} - -func minCountHistogramValidator(name string, minCount int) *metricValidator { - return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) { - count := histogram.Count() - if count < int64(minCount) { - t.Errorf("Expected histogram metric '%s' count >= %d, got %d", name, minCount, count) - } - }) -} - -func minMaxHistogramValidator(name string, expectedMin int, expectedMax int) *metricValidator { - return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) { - min := int(histogram.Min()) - if min != expectedMin { - t.Errorf("Expected histogram metric '%s' min = %d, got %d", name, expectedMin, min) - } - max := int(histogram.Max()) - if max != expectedMax { - t.Errorf("Expected histogram metric '%s' max = %d, got %d", name, expectedMax, max) - } - }) -} - -func minValHistogramValidator(name string, minMin int) *metricValidator { - return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) { - min := int(histogram.Min()) - if min < minMin { - t.Errorf("Expected histogram metric '%s' min >= %d, got %d", name, minMin, min) - } - }) -} - -func maxValHistogramValidator(name string, maxMax int) *metricValidator { - return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) { - max := int(histogram.Max()) - if max > maxMax { - t.Errorf("Expected histogram metric '%s' max <= %d, got %d", name, maxMax, max) - } - }) -} diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go index 55ef1e29..ff5a68ae 100644 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -18,7 +18,9 @@ const ( expectationTimeout = 500 * time.Millisecond ) -type requestHandlerFunc func(req *request) (res encoder) +type GSSApiHandlerFunc func([]byte) []byte + +type requestHandlerFunc func(req *request) (res encoderWithHeader) // RequestNotifierFunc is invoked when a mock broker processes a request successfully // and will provides the number of bytes read and written. @@ -49,18 +51,19 @@ type RequestNotifierFunc func(bytesRead, bytesWritten int) // It is not necessary to prefix message length or correlation ID to your // response bytes, the server does that automatically as a convenience. type MockBroker struct { - brokerID int32 - port int32 - closing chan none - stopper chan none - expectations chan encoder - listener net.Listener - t TestReporter - latency time.Duration - handler requestHandlerFunc - notifier RequestNotifierFunc - history []RequestResponse - lock sync.Mutex + brokerID int32 + port int32 + closing chan none + stopper chan none + expectations chan encoderWithHeader + listener net.Listener + t TestReporter + latency time.Duration + handler requestHandlerFunc + notifier RequestNotifierFunc + history []RequestResponse + lock sync.Mutex + gssApiHandler GSSApiHandlerFunc } // RequestResponse represents a Request/Response pair processed by MockBroker. @@ -80,7 +83,7 @@ func (b *MockBroker) SetLatency(latency time.Duration) { // and uses the found MockResponse instance to generate an appropriate reply. // If the request type is not found in the map then nothing is sent. func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { - b.setHandler(func(req *request) (res encoder) { + b.setHandler(func(req *request) (res encoderWithHeader) { reqTypeName := reflect.TypeOf(req.body).Elem().Name() mockResponse := handlerMap[reqTypeName] if mockResponse == nil { @@ -173,7 +176,44 @@ func (b *MockBroker) serverLoop() { Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) } -func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { +func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) { + b.gssApiHandler = handler +} + +func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) { + var ( + bytesRead int + lengthBytes = make([]byte, 4) + ) + + if _, err := io.ReadFull(r, lengthBytes); err != nil { + return nil, err + } + + bytesRead += len(lengthBytes) + length := int32(binary.BigEndian.Uint32(lengthBytes)) + + if length <= 4 || length > MaxRequestSize { + return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + } + + encodedReq := make([]byte, length) + if _, err := io.ReadFull(r, encodedReq); err != nil { + return nil, err + } + + bytesRead += len(encodedReq) + + fullBytes := append(lengthBytes, encodedReq...) + + return fullBytes, nil +} + +func (b *MockBroker) isGSSAPI(buffer []byte) bool { + return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04}) +} + +func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) { defer wg.Done() defer func() { _ = conn.Close() @@ -191,65 +231,110 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) } }() - resHeader := make([]byte, 8) + var bytesWritten int + var bytesRead int for { - req, bytesRead, err := decodeRequest(conn) + buffer, err := b.readToBytes(conn) if err != nil { - Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer)) b.serverError(err) break } - if b.latency > 0 { - time.Sleep(b.latency) - } - - b.lock.Lock() - res := b.handler(req) - b.history = append(b.history, RequestResponse{req.body, res}) - b.lock.Unlock() + bytesWritten = 0 + if !b.isGSSAPI(buffer) { + req, br, err := decodeRequest(bytes.NewReader(buffer)) + bytesRead = br + if err != nil { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + b.serverError(err) + break + } - if res == nil { - Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) - continue - } - Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) + if b.latency > 0 { + time.Sleep(b.latency) + } - encodedRes, err := encode(res, nil) - if err != nil { - b.serverError(err) - break - } - if len(encodedRes) == 0 { b.lock.Lock() - if b.notifier != nil { - b.notifier(bytesRead, 0) - } + res := b.handler(req) + b.history = append(b.history, RequestResponse{req.body, res}) b.lock.Unlock() - continue - } - binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) - binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) - if _, err = conn.Write(resHeader); err != nil { - b.serverError(err) - break - } - if _, err = conn.Write(encodedRes); err != nil { - b.serverError(err) - break + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) + continue + } + Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) + + encodedRes, err := encode(res, nil) + if err != nil { + b.serverError(err) + break + } + if len(encodedRes) == 0 { + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, 0) + } + b.lock.Unlock() + continue + } + + resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes))) + if _, err = conn.Write(resHeader); err != nil { + b.serverError(err) + break + } + if _, err = conn.Write(encodedRes); err != nil { + b.serverError(err) + break + } + bytesWritten = len(resHeader) + len(encodedRes) + } else { + // GSSAPI is not part of kafka protocol, but is supported for authentication proposes. + // Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism + b.lock.Lock() + res := b.gssApiHandler(buffer) + b.lock.Unlock() + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer)) + continue + } + if _, err = conn.Write(res); err != nil { + b.serverError(err) + break + } + bytesWritten = len(res) } b.lock.Lock() if b.notifier != nil { - b.notifier(bytesRead, len(resHeader)+len(encodedRes)) + b.notifier(bytesRead, bytesWritten) } b.lock.Unlock() } Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) } -func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) { +func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte { + headerLength := uint32(8) + + if headerVersion >= 1 { + headerLength = 9 + } + + resHeader := make([]byte, headerLength) + binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4) + binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId)) + + if headerVersion >= 1 { + binary.PutUvarint(resHeader[8:], 0) + } + + return resHeader +} + +func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) { select { case res, ok := <-b.expectations: if !ok { @@ -304,7 +389,7 @@ func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener stopper: make(chan none), t: t, brokerID: brokerID, - expectations: make(chan encoder, 512), + expectations: make(chan encoderWithHeader, 512), listener: listener, } broker.handler = broker.defaultRequestHandler @@ -325,6 +410,6 @@ func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener return broker } -func (b *MockBroker) Returns(e encoder) { +func (b *MockBroker) Returns(e encoderWithHeader) { b.expectations <- e } diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/Shopify/sarama/mockkerberos.go new file mode 100644 index 00000000..beb00e5b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockkerberos.go @@ -0,0 +1,123 @@ +package sarama + +import ( + "encoding/binary" + "encoding/hex" + + "github.com/jcmturner/gokrb5/v8/credentials" + "github.com/jcmturner/gokrb5/v8/gssapi" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +type KafkaGSSAPIHandler struct { + client *MockKerberosClient + badResponse bool + badKeyChecksum bool +} + +func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte { + // Default payload used for verify + err := h.client.Login() // Mock client construct keys when login + if err != nil { + return nil + } + if h.badResponse { // Returns trash + return []byte{0x00, 0x00, 0x00, 0x01, 0xAD} + } + + var pack = gssapi.WrapToken{ + Flags: KRB5_USER_AUTH, + EC: 12, + RRC: 0, + SndSeqNum: 3398292281, + Payload: []byte{0x11, 0x00}, // 1100 + } + // Compute checksum + if h.badKeyChecksum { + pack.CheckSum = []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} + } else { + err = pack.SetCheckSum(h.client.ASRep.DecryptedEncPart.Key, keyusage.GSSAPI_ACCEPTOR_SEAL) + if err != nil { + return nil + } + } + + packBytes, err := pack.Marshal() + if err != nil { + return nil + } + lenBytes := len(packBytes) + response := make([]byte, lenBytes+4) + copy(response[4:], packBytes) + binary.BigEndian.PutUint32(response, uint32(lenBytes)) + return response +} + +type MockKerberosClient struct { + asRepBytes string + ASRep messages.ASRep + credentials *credentials.Credentials + mockError error + errorStage string +} + +func (c *MockKerberosClient) Login() error { + if c.errorStage == "login" && c.mockError != nil { + return c.mockError + } + c.asRepBytes = "6b8202e9308202e5a003020105a10302010ba22b30293027a103020113a220041e301c301aa003020112a1131b114" + + "558414d504c452e434f4d636c69656e74a30d1b0b4558414d504c452e434f4da4133011a003020101a10a30081b06636c69656e7" + + "4a5820156618201523082014ea003020105a10d1b0b4558414d504c452e434f4da220301ea003020102a11730151b066b7262746" + + "7741b0b4558414d504c452e434f4da382011430820110a003020112a103020101a28201020481ffdb9891175d106818e61008c51" + + "d0b3462bca92f3bf9d4cfa82de4c4d7aff9994ec87c573e3a3d54dcb2bb79618c76f2bf4a3d006f90d5bdbd049bc18f48be39203" + + "549ca02acaf63f292b12404f9b74c34b83687119d8f56552ccc0c50ebee2a53bb114c1b4619bb1d5d31f0f49b4d40a08a9b4c046" + + "2e1398d0b648be1c0e50c552ad16e1d8d8e74263dd0bf0ec591e4797dfd40a9a1be4ae830d03a306e053fd7586fef84ffc5e4a83" + + "7c3122bf3e6a40fe87e84019f6283634461b955712b44a5f7386c278bff94ec2c2dc0403247e29c2450e853471ceababf9b8911f" + + "997f2e3010b046d2c49eb438afb0f4c210821e80d4ffa4c9521eb895dcd68610b3feaa682012c30820128a003020112a282011f0" + + "482011bce73cbce3f1dd17661c412005f0f2257c756fe8e98ff97e6ec24b7bab66e5fd3a3827aeeae4757af0c6e892948122d8b2" + + "03c8df48df0ef5d142d0e416d688f11daa0fcd63d96bdd431d02b8e951c664eeff286a2be62383d274a04016d5f0e141da58cb86" + + "331de64063062f4f885e8e9ce5b181ca2fdc67897c5995e0ae1ae0c171a64493ff7bd91bc6d89cd4fce1e2b3ea0a10e34b0d5eda" + + "aa38ee727b50c5632ed1d2f2b457908e616178d0d80b72af209fb8ac9dbaa1768fa45931392b36b6d8c12400f8ded2efaa0654d0" + + "da1db966e8b5aab4706c800f95d559664646041fdb38b411c62fc0fbe0d25083a28562b0e1c8df16e62e9d5626b0addee489835f" + + "eedb0f26c05baa596b69b17f47920aa64b29dc77cfcc97ba47885" + apRepBytes, err := hex.DecodeString(c.asRepBytes) + if err != nil { + return err + } + err = c.ASRep.Unmarshal(apRepBytes) + if err != nil { + return err + } + c.credentials = credentials.New("client", "EXAMPLE.COM").WithPassword("qwerty") + _, err = c.ASRep.DecryptEncPart(c.credentials) + if err != nil { + return err + } + return nil +} + +func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) { + if c.errorStage == "service_ticket" && c.mockError != nil { + return messages.Ticket{}, types.EncryptionKey{}, c.mockError + } + return c.ASRep.Ticket, c.ASRep.DecryptedEncPart.Key, nil +} + +func (c *MockKerberosClient) Domain() string { + return "EXAMPLE.COM" +} +func (c *MockKerberosClient) CName() types.PrincipalName { + var p = types.PrincipalName{ + NameType: KRB5_USER_AUTH, + NameString: []string{ + "kafka", + "kafka", + }, + } + return p +} +func (c *MockKerberosClient) Destroy() { + // Do nothing. +} diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go index 17204419..3df1ee0a 100644 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -2,6 +2,7 @@ package sarama import ( "fmt" + "strings" ) // TestReporter has methods matching go's testing.T to avoid importing @@ -17,20 +18,20 @@ type TestReporter interface { // allows generating a response based on a request body. MockResponses are used // to program behavior of MockBroker in tests. type MockResponse interface { - For(reqBody versionedDecoder) (res encoder) + For(reqBody versionedDecoder) (res encoderWithHeader) } // MockWrapper is a mock response builder that returns a particular concrete // response regardless of the actual request passed to the `For` method. type MockWrapper struct { - res encoder + res encoderWithHeader } -func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { +func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) { return mw.res } -func NewMockWrapper(res encoder) *MockWrapper { +func NewMockWrapper(res encoderWithHeader) *MockWrapper { return &MockWrapper{res: res} } @@ -49,7 +50,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence { switch res := res.(type) { case MockResponse: ms.responses[i] = res - case encoder: + case encoderWithHeader: ms.responses[i] = NewMockWrapper(res) default: panic(fmt.Sprintf("Unexpected response type: %T", res)) @@ -58,7 +59,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence { return ms } -func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { +func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) { res = mc.responses[0].For(reqBody) if len(mc.responses) > 1 { mc.responses = mc.responses[1:] @@ -66,6 +67,69 @@ func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { return res } +type MockListGroupsResponse struct { + groups map[string]string + t TestReporter +} + +func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse { + return &MockListGroupsResponse{ + groups: make(map[string]string), + t: t, + } +} + +func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + request := reqBody.(*ListGroupsRequest) + _ = request + response := &ListGroupsResponse{ + Groups: m.groups, + } + return response +} + +func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse { + m.groups[groupID] = protocolType + return m +} + +type MockDescribeGroupsResponse struct { + groups map[string]*GroupDescription + t TestReporter +} + +func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse { + return &MockDescribeGroupsResponse{ + t: t, + groups: make(map[string]*GroupDescription), + } +} + +func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse { + m.groups[groupID] = description + return m +} + +func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + request := reqBody.(*DescribeGroupsRequest) + + response := &DescribeGroupsResponse{} + for _, requestedGroup := range request.Groups { + if group, ok := m.groups[requestedGroup]; ok { + response.Groups = append(response.Groups, group) + } else { + // Mimic real kafka - if a group doesn't exist, return + // an entry with state "Dead" + response.Groups = append(response.Groups, &GroupDescription{ + GroupId: requestedGroup, + State: "Dead", + }) + } + } + + return response +} + // MockMetadataResponse is a `MetadataResponse` builder. type MockMetadataResponse struct { controllerID int32 @@ -102,7 +166,7 @@ func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResp return mmr } -func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { +func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { metadataRequest := reqBody.(*MetadataRequest) metadataResponse := &MetadataResponse{ Version: metadataRequest.version(), @@ -111,17 +175,25 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { for addr, brokerID := range mmr.brokers { metadataResponse.AddBroker(addr, brokerID) } + + // Generate set of replicas + var replicas []int32 + var offlineReplicas []int32 + for _, brokerID := range mmr.brokers { + replicas = append(replicas, brokerID) + } + if len(metadataRequest.Topics) == 0 { for topic, partitions := range mmr.leaders { for partition, brokerID := range partitions { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } return metadataResponse } for _, topic := range metadataRequest.Topics { for partition, brokerID := range mmr.leaders[topic] { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } return metadataResponse @@ -161,7 +233,7 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of return mor } -func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { +func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { offsetRequest := reqBody.(*OffsetRequest) offsetResponse := &OffsetResponse{Version: mor.version} for topic, partitions := range offsetRequest.blocks { @@ -237,7 +309,7 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of return mfr } -func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { +func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { fetchRequest := reqBody.(*FetchRequest) res := &FetchResponse{ Version: mfr.version, @@ -321,7 +393,7 @@ func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *M return mr } -func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ConsumerMetadataRequest) group := req.ConsumerGroup res := &ConsumerMetadataResponse{} @@ -370,7 +442,7 @@ func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, return mr } -func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*FindCoordinatorRequest) res := &FindCoordinatorResponse{} var v interface{} @@ -417,7 +489,7 @@ func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int3 return mr } -func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetCommitRequest) group := req.ConsumerGroup res := &OffsetCommitResponse{} @@ -474,7 +546,7 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE return mr } -func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ProduceRequest) res := &ProduceResponse{ Version: mr.version, @@ -502,6 +574,7 @@ func (mr *MockProduceResponse) getError(topic string, partition int32) KError { // MockOffsetFetchResponse is a `OffsetFetchResponse` builder. type MockOffsetFetchResponse struct { offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock + error KError t TestReporter } @@ -523,19 +596,29 @@ func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int3 partitions = make(map[int32]*OffsetFetchResponseBlock) topics[topic] = partitions } - partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror} + partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror} + return mr +} + +func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse { + mr.error = kerror return mr } -func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetFetchRequest) group := req.ConsumerGroup - res := &OffsetFetchResponse{} + res := &OffsetFetchResponse{Version: req.Version} + for topic, partitions := range mr.offsets[group] { for partition, block := range partitions { res.AddBlock(topic, partition, block) } } + + if res.Version >= 2 { + res.Err = mr.error + } return res } @@ -547,12 +630,22 @@ func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse { return &MockCreateTopicsResponse{t: t} } -func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateTopicsRequest) - res := &CreateTopicsResponse{} + res := &CreateTopicsResponse{ + Version: req.Version, + } res.TopicErrors = make(map[string]*TopicError) - for topic, _ := range req.TopicDetails { + for topic := range req.TopicDetails { + if res.Version >= 1 && strings.HasPrefix(topic, "_") { + msg := "insufficient permissions to create topic with reserved prefix" + res.TopicErrors[topic] = &TopicError{ + Err: ErrTopicAuthorizationFailed, + ErrMsg: &msg, + } + continue + } res.TopicErrors[topic] = &TopicError{Err: ErrNoError} } return res @@ -566,7 +659,7 @@ func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { return &MockDeleteTopicsResponse{t: t} } -func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteTopicsRequest) res := &DeleteTopicsResponse{} res.TopicErrorCodes = make(map[string]KError) @@ -574,6 +667,7 @@ func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder { for _, topic := range req.Topics { res.TopicErrorCodes[topic] = ErrNoError } + res.Version = req.Version return res } @@ -585,17 +679,62 @@ func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsRespon return &MockCreatePartitionsResponse{t: t} } -func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreatePartitionsRequest) res := &CreatePartitionsResponse{} res.TopicPartitionErrors = make(map[string]*TopicPartitionError) - for topic, _ := range req.TopicPartitions { + for topic := range req.TopicPartitions { + if strings.HasPrefix(topic, "_") { + msg := "insufficient permissions to create partition on topic with reserved prefix" + res.TopicPartitionErrors[topic] = &TopicPartitionError{ + Err: ErrTopicAuthorizationFailed, + ErrMsg: &msg, + } + continue + } res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError} } return res } +type MockAlterPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse { + return &MockAlterPartitionReassignmentsResponse{t: t} +} + +func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterPartitionReassignmentsRequest) + _ = req + res := &AlterPartitionReassignmentsResponse{} + return res +} + +type MockListPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse { + return &MockListPartitionReassignmentsResponse{t: t} +} + +func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ListPartitionReassignmentsRequest) + _ = req + res := &ListPartitionReassignmentsResponse{} + + for topic, partitions := range req.blocks { + for _, partition := range partitions { + res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2}) + } + } + + return res +} + type MockDeleteRecordsResponse struct { t TestReporter } @@ -604,14 +743,14 @@ func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse { return &MockDeleteRecordsResponse{t: t} } -func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteRecordsRequest) res := &DeleteRecordsResponse{} res.Topics = make(map[string]*DeleteRecordsResponseTopic) for topic, deleteRecordRequestTopic := range req.Topics { partitions := make(map[int32]*DeleteRecordsResponsePartition) - for partition, _ := range deleteRecordRequestTopic.PartitionOffsets { + for partition := range deleteRecordRequestTopic.PartitionOffsets { partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError} } res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions} @@ -627,20 +766,114 @@ func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse return &MockDescribeConfigsResponse{t: t} } -func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeConfigsRequest) - res := &DescribeConfigsResponse{} + res := &DescribeConfigsResponse{ + Version: req.Version, + } - var configEntries []*ConfigEntry - configEntries = append(configEntries, &ConfigEntry{Name: "my_topic", - Value: "my_topic", - ReadOnly: true, - Default: true, - Sensitive: false, - }) + includeSynonyms := req.Version > 0 + includeSource := req.Version > 0 for _, r := range req.Resources { - res.Resources = append(res.Resources, &ResourceResponse{Name: r.Name, Configs: configEntries}) + var configEntries []*ConfigEntry + switch r.Type { + case BrokerResource: + configEntries = append(configEntries, + &ConfigEntry{ + Name: "min.insync.replicas", + Value: "2", + ReadOnly: false, + Default: false, + }, + ) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + case BrokerLoggerResource: + configEntries = append(configEntries, + &ConfigEntry{ + Name: "kafka.controller.KafkaController", + Value: "DEBUG", + ReadOnly: false, + Default: false, + }, + ) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + case TopicResource: + maxMessageBytes := &ConfigEntry{Name: "max.message.bytes", + Value: "1000000", + ReadOnly: false, + Default: !includeSource, + Sensitive: false, + } + if includeSource { + maxMessageBytes.Source = SourceDefault + } + if includeSynonyms { + maxMessageBytes.Synonyms = []*ConfigSynonym{ + { + ConfigName: "max.message.bytes", + ConfigValue: "500000", + }, + } + } + retentionMs := &ConfigEntry{Name: "retention.ms", + Value: "5000", + ReadOnly: false, + Default: false, + Sensitive: false, + } + if includeSynonyms { + retentionMs.Synonyms = []*ConfigSynonym{ + { + ConfigName: "log.retention.ms", + ConfigValue: "2500", + }, + } + } + password := &ConfigEntry{Name: "password", + Value: "12345", + ReadOnly: false, + Default: false, + Sensitive: true, + } + configEntries = append( + configEntries, maxMessageBytes, retentionMs, password) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + } + } + return res +} + +type MockDescribeConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode { + return &MockDescribeConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeConfigsRequest) + res := &DescribeConfigsResponse{ + Version: req.Version, + } + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) } return res } @@ -653,19 +886,42 @@ func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse { return &MockAlterConfigsResponse{t: t} } -func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterConfigsRequest) res := &AlterConfigsResponse{} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name, - Type: TopicResource, + Type: r.Type, ErrorMsg: "", }) } return res } +type MockAlterConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode { + return &MockAlterConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterConfigsRequest) + res := &AlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + type MockCreateAclsResponse struct { t TestReporter } @@ -674,7 +930,7 @@ func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse { return &MockCreateAclsResponse{t: t} } -func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateAclsRequest) res := &CreateAclsResponse{} @@ -692,29 +948,101 @@ func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse { return &MockListAclsResponse{t: t} } -func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeAclsRequest) res := &DescribeAclsResponse{} - res.Err = ErrNoError acl := &ResourceAcls{} - acl.Resource.ResourceName = *req.ResourceName + if req.ResourceName != nil { + acl.Resource.ResourceName = *req.ResourceName + } + acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter acl.Resource.ResourceType = req.ResourceType - acl.Acls = append(acl.Acls, &Acl{}) + + host := "*" + if req.Host != nil { + host = *req.Host + } + + principal := "User:test" + if req.Principal != nil { + principal = *req.Principal + } + + permissionType := req.PermissionType + if permissionType == AclPermissionAny { + permissionType = AclPermissionAllow + } + + acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal}) res.ResourceAcls = append(res.ResourceAcls, acl) + res.Version = int16(req.Version) + return res +} + +type MockSaslAuthenticateResponse struct { + t TestReporter + kerror KError + saslAuthBytes []byte +} +func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse { + return &MockSaslAuthenticateResponse{t: t} +} + +func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader { + res := &SaslAuthenticateResponse{} + res.Err = msar.kerror + res.SaslAuthBytes = msar.saslAuthBytes return res } +func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse { + msar.kerror = kerror + return msar +} + +func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse { + msar.saslAuthBytes = saslAuthBytes + return msar +} + type MockDeleteAclsResponse struct { t TestReporter } +type MockSaslHandshakeResponse struct { + enabledMechanisms []string + kerror KError + t TestReporter +} + +func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse { + return &MockSaslHandshakeResponse{t: t} +} + +func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader { + res := &SaslHandshakeResponse{} + res.Err = mshr.kerror + res.EnabledMechanisms = mshr.enabledMechanisms + return res +} + +func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse { + mshr.kerror = kerror + return mshr +} + +func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse { + mshr.enabledMechanisms = enabledMechanisms + return mshr +} + func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse { return &MockDeleteAclsResponse{t: t} } -func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteAclsRequest) res := &DeleteAclsResponse{} @@ -723,5 +1051,219 @@ func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder { response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError}) res.FilterResponses = append(res.FilterResponses, response) } + res.Version = int16(req.Version) return res } + +type MockDeleteGroupsResponse struct { + deletedGroups []string +} + +func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse { + return &MockDeleteGroupsResponse{} +} + +func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse { + m.deletedGroups = groups + return m +} + +func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DeleteGroupsResponse{ + GroupErrorCodes: map[string]KError{}, + } + for _, group := range m.deletedGroups { + resp.GroupErrorCodes[group] = ErrNoError + } + return resp +} + +type MockJoinGroupResponse struct { + t TestReporter + + ThrottleTime int32 + Err KError + GenerationId int32 + GroupProtocol string + LeaderId string + MemberId string + Members map[string][]byte +} + +func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse { + return &MockJoinGroupResponse{ + t: t, + Members: make(map[string][]byte), + } +} + +func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*JoinGroupRequest) + resp := &JoinGroupResponse{ + Version: req.Version, + ThrottleTime: m.ThrottleTime, + Err: m.Err, + GenerationId: m.GenerationId, + GroupProtocol: m.GroupProtocol, + LeaderId: m.LeaderId, + MemberId: m.MemberId, + Members: m.Members, + } + return resp +} + +func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse { + m.ThrottleTime = t + return m +} + +func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse { + m.Err = kerr + return m +} + +func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse { + m.GenerationId = id + return m +} + +func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse { + m.GroupProtocol = proto + return m +} + +func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse { + m.LeaderId = id + return m +} + +func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse { + m.MemberId = id + return m +} + +func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse { + bin, err := encode(meta, nil) + if err != nil { + panic(fmt.Sprintf("error encoding member metadata: %v", err)) + } + m.Members[id] = bin + return m +} + +type MockLeaveGroupResponse struct { + t TestReporter + + Err KError +} + +func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse { + return &MockLeaveGroupResponse{t: t} +} + +func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &LeaveGroupResponse{ + Err: m.Err, + } + return resp +} + +func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse { + m.Err = kerr + return m +} + +type MockSyncGroupResponse struct { + t TestReporter + + Err KError + MemberAssignment []byte +} + +func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse { + return &MockSyncGroupResponse{t: t} +} + +func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &SyncGroupResponse{ + Err: m.Err, + MemberAssignment: m.MemberAssignment, + } + return resp +} + +func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse { + m.Err = kerr + return m +} + +func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse { + bin, err := encode(assignment, nil) + if err != nil { + panic(fmt.Sprintf("error encoding member assignment: %v", err)) + } + m.MemberAssignment = bin + return m +} + +type MockHeartbeatResponse struct { + t TestReporter + + Err KError +} + +func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse { + return &MockHeartbeatResponse{t: t} +} + +func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &HeartbeatResponse{} + return resp +} + +func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse { + m.Err = kerr + return m +} + +type MockDescribeLogDirsResponse struct { + t TestReporter + logDirs []DescribeLogDirsResponseDirMetadata +} + +func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse { + return &MockDescribeLogDirsResponse{t: t} +} + +func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse { + var topics []DescribeLogDirsResponseTopic + for topic := range topicPartitions { + var partitions []DescribeLogDirsResponsePartition + for i := 0; i < topicPartitions[topic]; i++ { + partitions = append(partitions, DescribeLogDirsResponsePartition{ + PartitionID: int32(i), + IsTemporary: false, + OffsetLag: int64(0), + Size: int64(1234), + }) + } + topics = append(topics, DescribeLogDirsResponseTopic{ + Topic: topic, + Partitions: partitions, + }) + } + logDir := DescribeLogDirsResponseDirMetadata{ + ErrorCode: ErrNoError, + Path: logDirPath, + Topics: topics, + } + m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir} + return m +} + +func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DescribeLogDirsResponse{ + LogDirs: m.logDirs, + } + return resp +} diff --git a/vendor/github.com/Shopify/sarama/mocks/README.md b/vendor/github.com/Shopify/sarama/mocks/README.md deleted file mode 100644 index 55a6c2e6..00000000 --- a/vendor/github.com/Shopify/sarama/mocks/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# sarama/mocks - -The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types. -You can use them to test your sarama applications using dependency injection. - -The following mock objects are available: - -- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks. -- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer) -- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer) - -The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified, -and the results will be reported to the `*testing.T` object you provided when creating the mock. diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer.go b/vendor/github.com/Shopify/sarama/mocks/async_producer.go deleted file mode 100644 index 488ef075..00000000 --- a/vendor/github.com/Shopify/sarama/mocks/async_producer.go +++ /dev/null @@ -1,173 +0,0 @@ -package mocks - -import ( - "sync" - - "github.com/Shopify/sarama" -) - -// AsyncProducer implements sarama's Producer interface for testing purposes. -// Before you can send messages to it's Input channel, you have to set expectations -// so it knows how to handle the input; it returns an error if the number of messages -// received is bigger then the number of expectations set. You can also set a -// function in each expectation so that the message value is checked by this function -// and an error is returned if the match fails. -type AsyncProducer struct { - l sync.Mutex - t ErrorReporter - expectations []*producerExpectation - closed chan struct{} - input chan *sarama.ProducerMessage - successes chan *sarama.ProducerMessage - errors chan *sarama.ProducerError - lastOffset int64 -} - -// NewAsyncProducer instantiates a new Producer mock. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument is used to determine whether it -// should ack successes on the Successes channel. -func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { - if config == nil { - config = sarama.NewConfig() - } - mp := &AsyncProducer{ - t: t, - closed: make(chan struct{}, 0), - expectations: make([]*producerExpectation, 0), - input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), - successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), - errors: make(chan *sarama.ProducerError, config.ChannelBufferSize), - } - - go func() { - defer func() { - close(mp.successes) - close(mp.errors) - close(mp.closed) - }() - - for msg := range mp.input { - mp.l.Lock() - if mp.expectations == nil || len(mp.expectations) == 0 { - mp.expectations = nil - mp.t.Errorf("No more expectation set on this mock producer to handle the input message.") - } else { - expectation := mp.expectations[0] - mp.expectations = mp.expectations[1:] - if expectation.CheckFunction != nil { - if val, err := msg.Value.Encode(); err != nil { - mp.t.Errorf("Input message encoding failed: %s", err.Error()) - mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} - } else { - err = expectation.CheckFunction(val) - if err != nil { - mp.t.Errorf("Check function returned an error: %s", err.Error()) - mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} - } - } - } - if expectation.Result == errProduceSuccess { - mp.lastOffset++ - if config.Producer.Return.Successes { - msg.Offset = mp.lastOffset - mp.successes <- msg - } - } else { - if config.Producer.Return.Errors { - mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg} - } - } - } - mp.l.Unlock() - } - - mp.l.Lock() - if len(mp.expectations) > 0 { - mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) - } - mp.l.Unlock() - }() - - return mp -} - -//////////////////////////////////////////////// -// Implement Producer interface -//////////////////////////////////////////////// - -// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation. -// By closing a mock producer, you also tell it that no more input will be provided, so it will -// write an error to the test state if there's any remaining expectations. -func (mp *AsyncProducer) AsyncClose() { - close(mp.input) -} - -// Close corresponds with the Close method of sarama's Producer implementation. -// By closing a mock producer, you also tell it that no more input will be provided, so it will -// write an error to the test state if there's any remaining expectations. -func (mp *AsyncProducer) Close() error { - mp.AsyncClose() - <-mp.closed - return nil -} - -// Input corresponds with the Input method of sarama's Producer implementation. -// You have to set expectations on the mock producer before writing messages to the Input -// channel, so it knows how to handle them. If there is no more remaining expectations and -// a messages is written to the Input channel, the mock producer will write an error to the test -// state object. -func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage { - return mp.input -} - -// Successes corresponds with the Successes method of sarama's Producer implementation. -func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage { - return mp.successes -} - -// Errors corresponds with the Errors method of sarama's Producer implementation. -func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError { - return mp.errors -} - -//////////////////////////////////////////////// -// Setting expectations -//////////////////////////////////////////////// - -// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message -// will be provided on the input channel. The mock producer will call the given function to check -// the message value. If an error is returned it will be made available on the Errors channel -// otherwise the mock will handle the message as if it produced successfully, i.e. it will make -// it available on the Successes channel if the Producer.Return.Successes setting is set to true. -func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) { - mp.l.Lock() - defer mp.l.Unlock() - mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) -} - -// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message -// will be provided on the input channel. The mock producer will first call the given function to -// check the message value. If an error is returned it will be made available on the Errors channel -// otherwise the mock will handle the message as if it failed to produce successfully. This means -// it will make a ProducerError available on the Errors channel. -func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) { - mp.l.Lock() - defer mp.l.Unlock() - mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) -} - -// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided -// on the input channel. The mock producer will handle the message as if it is produced successfully, -// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting -// is set to true. -func (mp *AsyncProducer) ExpectInputAndSucceed() { - mp.ExpectInputWithCheckerFunctionAndSucceed(nil) -} - -// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided -// on the input channel. The mock producer will handle the message as if it failed to produce -// successfully. This means it will make a ProducerError available on the Errors channel. -func (mp *AsyncProducer) ExpectInputAndFail(err error) { - mp.ExpectInputWithCheckerFunctionAndFail(nil, err) -} diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go b/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go deleted file mode 100644 index b5d92aad..00000000 --- a/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package mocks - -import ( - "errors" - "fmt" - "regexp" - "strings" - "testing" - - "github.com/Shopify/sarama" -) - -func generateRegexpChecker(re string) func([]byte) error { - return func(val []byte) error { - matched, err := regexp.MatchString(re, string(val)) - if err != nil { - return errors.New("Error while trying to match the input message with the expected pattern: " + err.Error()) - } - if !matched { - return fmt.Errorf("No match between input value \"%s\" and expected pattern \"%s\"", val, re) - } - return nil - } -} - -type testReporterMock struct { - errors []string -} - -func newTestReporterMock() *testReporterMock { - return &testReporterMock{errors: make([]string, 0)} -} - -func (trm *testReporterMock) Errorf(format string, args ...interface{}) { - trm.errors = append(trm.errors, fmt.Sprintf(format, args...)) -} - -func TestMockAsyncProducerImplementsAsyncProducerInterface(t *testing.T) { - var mp interface{} = &AsyncProducer{} - if _, ok := mp.(sarama.AsyncProducer); !ok { - t.Error("The mock producer should implement the sarama.Producer interface.") - } -} - -func TestProducerReturnsExpectationsToChannels(t *testing.T) { - config := sarama.NewConfig() - config.Producer.Return.Successes = true - mp := NewAsyncProducer(t, config) - - mp.ExpectInputAndSucceed() - mp.ExpectInputAndSucceed() - mp.ExpectInputAndFail(sarama.ErrOutOfBrokers) - - mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"} - mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"} - mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"} - - msg1 := <-mp.Successes() - msg2 := <-mp.Successes() - err1 := <-mp.Errors() - - if msg1.Topic != "test 1" { - t.Error("Expected message 1 to be returned first") - } - - if msg2.Topic != "test 2" { - t.Error("Expected message 2 to be returned second") - } - - if err1.Msg.Topic != "test 3" || err1.Err != sarama.ErrOutOfBrokers { - t.Error("Expected message 3 to be returned as error") - } - - if err := mp.Close(); err != nil { - t.Error(err) - } -} - -func TestProducerWithTooFewExpectations(t *testing.T) { - trm := newTestReporterMock() - mp := NewAsyncProducer(trm, nil) - mp.ExpectInputAndSucceed() - - mp.Input() <- &sarama.ProducerMessage{Topic: "test"} - mp.Input() <- &sarama.ProducerMessage{Topic: "test"} - - if err := mp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestProducerWithTooManyExpectations(t *testing.T) { - trm := newTestReporterMock() - mp := NewAsyncProducer(trm, nil) - mp.ExpectInputAndSucceed() - mp.ExpectInputAndFail(sarama.ErrOutOfBrokers) - - mp.Input() <- &sarama.ProducerMessage{Topic: "test"} - if err := mp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestProducerWithCheckerFunction(t *testing.T) { - trm := newTestReporterMock() - mp := NewAsyncProducer(trm, nil) - mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) - mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$")) - - mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if err := mp.Close(); err != nil { - t.Error(err) - } - - if len(mp.Errors()) != 1 { - t.Error("Expected to report an error") - } - - err1 := <-mp.Errors() - if !strings.HasPrefix(err1.Err.Error(), "No match") { - t.Error("Expected to report a value check error, found: ", err1.Err) - } -} diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer.go b/vendor/github.com/Shopify/sarama/mocks/consumer.go deleted file mode 100644 index 003d4d3e..00000000 --- a/vendor/github.com/Shopify/sarama/mocks/consumer.go +++ /dev/null @@ -1,315 +0,0 @@ -package mocks - -import ( - "sync" - "sync/atomic" - - "github.com/Shopify/sarama" -) - -// Consumer implements sarama's Consumer interface for testing purposes. -// Before you can start consuming from this consumer, you have to register -// topic/partitions using ExpectConsumePartition, and set expectations on them. -type Consumer struct { - l sync.Mutex - t ErrorReporter - config *sarama.Config - partitionConsumers map[string]map[int32]*PartitionConsumer - metadata map[string][]int32 -} - -// NewConsumer returns a new mock Consumer instance. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument can be set to nil. -func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { - if config == nil { - config = sarama.NewConfig() - } - - c := &Consumer{ - t: t, - config: config, - partitionConsumers: make(map[string]map[int32]*PartitionConsumer), - } - return c -} - -/////////////////////////////////////////////////// -// Consumer interface implementation -/////////////////////////////////////////////////// - -// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface. -// Before you can start consuming a partition, you have to set expectations on it using -// ExpectConsumePartition. You can only consume a partition once per consumer. -func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil { - c.t.Errorf("No expectations set for %s/%d", topic, partition) - return nil, errOutOfExpectations - } - - pc := c.partitionConsumers[topic][partition] - if pc.consumed { - return nil, sarama.ConfigurationError("The topic/partition is already being consumed") - } - - if pc.offset != AnyOffset && pc.offset != offset { - c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset) - } - - pc.consumed = true - return pc, nil -} - -// Topics returns a list of topics, as registered with SetMetadata -func (c *Consumer) Topics() ([]string, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.metadata == nil { - c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.") - return nil, sarama.ErrOutOfBrokers - } - - var result []string - for topic := range c.metadata { - result = append(result, topic) - } - return result, nil -} - -// Partitions returns the list of parititons for the given topic, as registered with SetMetadata -func (c *Consumer) Partitions(topic string) ([]int32, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.metadata == nil { - c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.") - return nil, sarama.ErrOutOfBrokers - } - if c.metadata[topic] == nil { - return nil, sarama.ErrUnknownTopicOrPartition - } - - return c.metadata[topic], nil -} - -func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { - c.l.Lock() - defer c.l.Unlock() - - hwms := make(map[string]map[int32]int64, len(c.partitionConsumers)) - for topic, partitionConsumers := range c.partitionConsumers { - hwm := make(map[int32]int64, len(partitionConsumers)) - for partition, pc := range partitionConsumers { - hwm[partition] = pc.HighWaterMarkOffset() - } - hwms[topic] = hwm - } - - return hwms -} - -// Close implements the Close method from the sarama.Consumer interface. It will close -// all registered PartitionConsumer instances. -func (c *Consumer) Close() error { - c.l.Lock() - defer c.l.Unlock() - - for _, partitions := range c.partitionConsumers { - for _, partitionConsumer := range partitions { - _ = partitionConsumer.Close() - } - } - - return nil -} - -/////////////////////////////////////////////////// -// Expectation API -/////////////////////////////////////////////////// - -// SetTopicMetadata sets the clusters topic/partition metadata, -// which will be returned by Topics() and Partitions(). -func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) { - c.l.Lock() - defer c.l.Unlock() - - c.metadata = metadata -} - -// ExpectConsumePartition will register a topic/partition, so you can set expectations on it. -// The registered PartitionConsumer will be returned, so you can set expectations -// on it using method chaining. Once a topic/partition is registered, you are -// expected to start consuming it using ConsumePartition. If that doesn't happen, -// an error will be written to the error reporter once the mock consumer is closed. It will -// also expect that the -func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer { - c.l.Lock() - defer c.l.Unlock() - - if c.partitionConsumers[topic] == nil { - c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer) - } - - if c.partitionConsumers[topic][partition] == nil { - c.partitionConsumers[topic][partition] = &PartitionConsumer{ - t: c.t, - topic: topic, - partition: partition, - offset: offset, - messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), - errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize), - } - } - - return c.partitionConsumers[topic][partition] -} - -/////////////////////////////////////////////////// -// PartitionConsumer mock type -/////////////////////////////////////////////////// - -// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes. -// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is -// registered first using the Consumer's ExpectConsumePartition method. Before consuming the -// Errors and Messages channel, you should specify what values will be provided on these -// channels using YieldMessage and YieldError. -type PartitionConsumer struct { - highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG - l sync.Mutex - t ErrorReporter - topic string - partition int32 - offset int64 - messages chan *sarama.ConsumerMessage - errors chan *sarama.ConsumerError - singleClose sync.Once - consumed bool - errorsShouldBeDrained bool - messagesShouldBeDrained bool -} - -/////////////////////////////////////////////////// -// PartitionConsumer interface implementation -/////////////////////////////////////////////////// - -// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) AsyncClose() { - pc.singleClose.Do(func() { - close(pc.messages) - close(pc.errors) - }) -} - -// Close implements the Close method from the sarama.PartitionConsumer interface. It will -// verify whether the partition consumer was actually started. -func (pc *PartitionConsumer) Close() error { - if !pc.consumed { - pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition) - return errPartitionConsumerNotStarted - } - - if pc.errorsShouldBeDrained && len(pc.errors) > 0 { - pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors)) - } - - if pc.messagesShouldBeDrained && len(pc.messages) > 0 { - pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages)) - } - - pc.AsyncClose() - - var ( - closeErr error - wg sync.WaitGroup - ) - - wg.Add(1) - go func() { - defer wg.Done() - - var errs = make(sarama.ConsumerErrors, 0) - for err := range pc.errors { - errs = append(errs, err) - } - - if len(errs) > 0 { - closeErr = errs - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for range pc.messages { - // drain - } - }() - - wg.Wait() - return closeErr -} - -// Errors implements the Errors method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError { - return pc.errors -} - -// Messages implements the Messages method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage { - return pc.messages -} - -func (pc *PartitionConsumer) HighWaterMarkOffset() int64 { - return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1 -} - -/////////////////////////////////////////////////// -// Expectation API -/////////////////////////////////////////////////// - -// YieldMessage will yield a messages Messages channel of this partition consumer -// when it is consumed. By default, the mock consumer will not verify whether this -// message was consumed from the Messages channel, because there are legitimate -// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will -// verify that the channel is empty on close. -func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) { - pc.l.Lock() - defer pc.l.Unlock() - - msg.Topic = pc.topic - msg.Partition = pc.partition - msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1) - - pc.messages <- msg -} - -// YieldError will yield an error on the Errors channel of this partition consumer -// when it is consumed. By default, the mock consumer will not verify whether this error was -// consumed from the Errors channel, because there are legitimate reasons for this -// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that -// the channel is empty on close. -func (pc *PartitionConsumer) YieldError(err error) { - pc.errors <- &sarama.ConsumerError{ - Topic: pc.topic, - Partition: pc.partition, - Err: err, - } -} - -// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer -// that the messages channel will be fully drained when Close is called. If this -// expectation is not met, an error is reported to the error reporter. -func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() { - pc.messagesShouldBeDrained = true -} - -// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer -// that the errors channel will be fully drained when Close is called. If this -// expectation is not met, an error is reported to the error reporter. -func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() { - pc.errorsShouldBeDrained = true -} diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer_test.go b/vendor/github.com/Shopify/sarama/mocks/consumer_test.go deleted file mode 100644 index 311cfa02..00000000 --- a/vendor/github.com/Shopify/sarama/mocks/consumer_test.go +++ /dev/null @@ -1,249 +0,0 @@ -package mocks - -import ( - "sort" - "testing" - - "github.com/Shopify/sarama" -) - -func TestMockConsumerImplementsConsumerInterface(t *testing.T) { - var c interface{} = &Consumer{} - if _, ok := c.(sarama.Consumer); !ok { - t.Error("The mock consumer should implement the sarama.Consumer interface.") - } - - var pc interface{} = &PartitionConsumer{} - if _, ok := pc.(sarama.PartitionConsumer); !ok { - t.Error("The mock partitionconsumer should implement the sarama.PartitionConsumer interface.") - } -} - -func TestConsumerHandlesExpectations(t *testing.T) { - consumer := NewConsumer(t, nil) - defer func() { - if err := consumer.Close(); err != nil { - t.Error(err) - } - }() - - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")}) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) - consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")}) - consumer.ExpectConsumePartition("other", 0, AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")}) - - pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Fatal(err) - } - test0_msg := <-pc_test0.Messages() - if test0_msg.Topic != "test" || test0_msg.Partition != 0 || string(test0_msg.Value) != "hello world" { - t.Error("Message was not as expected:", test0_msg) - } - test0_err := <-pc_test0.Errors() - if test0_err.Err != sarama.ErrOutOfBrokers { - t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err) - } - - pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest) - if err != nil { - t.Fatal(err) - } - test1_msg := <-pc_test1.Messages() - if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" { - t.Error("Message was not as expected:", test1_msg) - } - - pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest) - if err != nil { - t.Fatal(err) - } - other0_msg := <-pc_other0.Messages() - if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" { - t.Error("Message was not as expected:", other0_msg) - } -} - -func TestConsumerReturnsNonconsumedErrorsOnClose(t *testing.T) { - consumer := NewConsumer(t, nil) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) - - pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Fatal(err) - } - - select { - case <-pc.Messages(): - t.Error("Did not epxect a message on the messages channel.") - case err := <-pc.Errors(): - if err.Err != sarama.ErrOutOfBrokers { - t.Error("Expected sarama.ErrOutOfBrokers, found", err) - } - } - - errs := pc.Close().(sarama.ConsumerErrors) - if len(errs) != 1 && errs[0].Err != sarama.ErrOutOfBrokers { - t.Error("Expected Close to return the remaining sarama.ErrOutOfBrokers") - } -} - -func TestConsumerWithoutExpectationsOnPartition(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - _, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest) - if err != errOutOfExpectations { - t.Error("Expected ConsumePartition to return errOutOfExpectations") - } - - if err := consumer.Close(); err != nil { - t.Error("No error expected on close, but found:", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} - -func TestConsumerWithExpectationsOnUnconsumedPartition(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")}) - - if err := consumer.Close(); err != nil { - t.Error("No error expected on close, but found:", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} - -func TestConsumerWithWrongOffsetExpectation(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) - - _, err := consumer.ConsumePartition("test", 0, sarama.OffsetNewest) - if err != nil { - t.Error("Did not expect error, found:", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } - - if err := consumer.Close(); err != nil { - t.Error(err) - } -} - -func TestConsumerViolatesMessagesDrainedExpectation(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) - pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) - pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) - pcmock.ExpectMessagesDrainedOnClose() - - pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Error(err) - } - - // consume first message, not second one - <-pc.Messages() - - if err := consumer.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} - -func TestConsumerMeetsErrorsDrainedExpectation(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) - pcmock.YieldError(sarama.ErrInvalidMessage) - pcmock.YieldError(sarama.ErrInvalidMessage) - pcmock.ExpectErrorsDrainedOnClose() - - pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Error(err) - } - - // consume first and second error, - <-pc.Errors() - <-pc.Errors() - - if err := consumer.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 0 { - t.Errorf("Expected no expectation failures to be set on the error reporter.") - } -} - -func TestConsumerTopicMetadata(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - consumer.SetTopicMetadata(map[string][]int32{ - "test1": {0, 1, 2, 3}, - "test2": {0, 1, 2, 3, 4, 5, 6, 7}, - }) - - topics, err := consumer.Topics() - if err != nil { - t.Error(t) - } - - sortedTopics := sort.StringSlice(topics) - sortedTopics.Sort() - if len(sortedTopics) != 2 || sortedTopics[0] != "test1" || sortedTopics[1] != "test2" { - t.Error("Unexpected topics returned:", sortedTopics) - } - - partitions1, err := consumer.Partitions("test1") - if err != nil { - t.Error(t) - } - - if len(partitions1) != 4 { - t.Error("Unexpected partitions returned:", len(partitions1)) - } - - partitions2, err := consumer.Partitions("test2") - if err != nil { - t.Error(t) - } - - if len(partitions2) != 8 { - t.Error("Unexpected partitions returned:", len(partitions2)) - } - - if len(trm.errors) != 0 { - t.Errorf("Expected no expectation failures to be set on the error reporter.") - } -} - -func TestConsumerUnexpectedTopicMetadata(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - if _, err := consumer.Topics(); err != sarama.ErrOutOfBrokers { - t.Error("Expected sarama.ErrOutOfBrokers, found", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} diff --git a/vendor/github.com/Shopify/sarama/mocks/mocks.go b/vendor/github.com/Shopify/sarama/mocks/mocks.go deleted file mode 100644 index 4adb838d..00000000 --- a/vendor/github.com/Shopify/sarama/mocks/mocks.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Package mocks provides mocks that can be used for testing applications -that use Sarama. The mock types provided by this package implement the -interfaces Sarama exports, so you can use them for dependency injection -in your tests. - -All mock instances require you to set expectations on them before you -can use them. It will determine how the mock will behave. If an -expectation is not met, it will make your test fail. - -NOTE: this package currently does not fall under the API stability -guarantee of Sarama as it is still considered experimental. -*/ -package mocks - -import ( - "errors" - - "github.com/Shopify/sarama" -) - -// ErrorReporter is a simple interface that includes the testing.T methods we use to report -// expectation violations when using the mock objects. -type ErrorReporter interface { - Errorf(string, ...interface{}) -} - -// ValueChecker is a function type to be set in each expectation of the producer mocks -// to check the value passed. -type ValueChecker func(val []byte) error - -var ( - errProduceSuccess error = nil - errOutOfExpectations = errors.New("No more expectations set on mock") - errPartitionConsumerNotStarted = errors.New("The partition consumer was never started") -) - -const AnyOffset int64 = -1000 - -type producerExpectation struct { - Result error - CheckFunction ValueChecker -} - -type consumerExpectation struct { - Err error - Msg *sarama.ConsumerMessage -} diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go deleted file mode 100644 index 3f4986e2..00000000 --- a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go +++ /dev/null @@ -1,157 +0,0 @@ -package mocks - -import ( - "sync" - - "github.com/Shopify/sarama" -) - -// SyncProducer implements sarama's SyncProducer interface for testing purposes. -// Before you can use it, you have to set expectations on the mock SyncProducer -// to tell it how to handle calls to SendMessage, so you can easily test success -// and failure scenarios. -type SyncProducer struct { - l sync.Mutex - t ErrorReporter - expectations []*producerExpectation - lastOffset int64 -} - -// NewSyncProducer instantiates a new SyncProducer mock. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument is currently unused, but is -// maintained to be compatible with the async Producer. -func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer { - return &SyncProducer{ - t: t, - expectations: make([]*producerExpectation, 0), - } -} - -//////////////////////////////////////////////// -// Implement SyncProducer interface -//////////////////////////////////////////////// - -// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation. -// You have to set expectations on the mock producer before calling SendMessage, so it knows -// how to handle them. You can set a function in each expectation so that the message value -// checked by this function and an error is returned if the match fails. -// If there is no more remaining expectation when SendMessage is called, -// the mock producer will write an error to the test state object. -func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { - sp.l.Lock() - defer sp.l.Unlock() - - if len(sp.expectations) > 0 { - expectation := sp.expectations[0] - sp.expectations = sp.expectations[1:] - if expectation.CheckFunction != nil { - val, err := msg.Value.Encode() - if err != nil { - sp.t.Errorf("Input message encoding failed: %s", err.Error()) - return -1, -1, err - } - - errCheck := expectation.CheckFunction(val) - if errCheck != nil { - sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) - return -1, -1, errCheck - } - } - if expectation.Result == errProduceSuccess { - sp.lastOffset++ - msg.Offset = sp.lastOffset - return 0, msg.Offset, nil - } - return -1, -1, expectation.Result - } - sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") - return -1, -1, errOutOfExpectations -} - -// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation. -// You have to set expectations on the mock producer before calling SendMessages, so it knows -// how to handle them. If there is no more remaining expectations when SendMessages is called, -// the mock producer will write an error to the test state object. -func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error { - sp.l.Lock() - defer sp.l.Unlock() - - if len(sp.expectations) >= len(msgs) { - expectations := sp.expectations[0:len(msgs)] - sp.expectations = sp.expectations[len(msgs):] - - for i, expectation := range expectations { - if expectation.CheckFunction != nil { - val, err := msgs[i].Value.Encode() - if err != nil { - sp.t.Errorf("Input message encoding failed: %s", err.Error()) - return err - } - errCheck := expectation.CheckFunction(val) - if errCheck != nil { - sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) - return errCheck - } - } - if expectation.Result != errProduceSuccess { - return expectation.Result - } - } - return nil - } - sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.") - return errOutOfExpectations -} - -// Close corresponds with the Close method of sarama's SyncProducer implementation. -// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow, -// so it will write an error to the test state if there's any remaining expectations. -func (sp *SyncProducer) Close() error { - sp.l.Lock() - defer sp.l.Unlock() - - if len(sp.expectations) > 0 { - sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations)) - } - - return nil -} - -//////////////////////////////////////////////// -// Setting expectations -//////////////////////////////////////////////// - -// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage -// will be called. The mock producer will first call the given function to check the message value. -// It will cascade the error of the function, if any, or handle the message as if it produced -// successfully, i.e. by returning a valid partition, and offset, and a nil error. -func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) { - sp.l.Lock() - defer sp.l.Unlock() - sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) -} - -// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be -// called. The mock producer will first call the given function to check the message value. -// It will cascade the error of the function, if any, or handle the message as if it failed -// to produce successfully, i.e. by returning the provided error. -func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) { - sp.l.Lock() - defer sp.l.Unlock() - sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) -} - -// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be -// called. The mock producer will handle the message as if it produced successfully, i.e. by -// returning a valid partition, and offset, and a nil error. -func (sp *SyncProducer) ExpectSendMessageAndSucceed() { - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(nil) -} - -// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be -// called. The mock producer will handle the message as if it failed to produce -// successfully, i.e. by returning the provided error. -func (sp *SyncProducer) ExpectSendMessageAndFail(err error) { - sp.ExpectSendMessageWithCheckerFunctionAndFail(nil, err) -} diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go deleted file mode 100644 index bf2c71a1..00000000 --- a/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package mocks - -import ( - "errors" - "strings" - "testing" - - "github.com/Shopify/sarama" -) - -func TestMockSyncProducerImplementsSyncProducerInterface(t *testing.T) { - var mp interface{} = &SyncProducer{} - if _, ok := mp.(sarama.SyncProducer); !ok { - t.Error("The mock async producer should implement the sarama.SyncProducer interface.") - } -} - -func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) { - sp := NewSyncProducer(t, nil) - defer func() { - if err := sp.Close(); err != nil { - t.Error(err) - } - }() - - sp.ExpectSendMessageAndSucceed() - sp.ExpectSendMessageAndSucceed() - sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) - - msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - - _, offset, err := sp.SendMessage(msg) - if err != nil { - t.Errorf("The first message should have been produced successfully, but got %s", err) - } - if offset != 1 || offset != msg.Offset { - t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset) - } - - _, offset, err = sp.SendMessage(msg) - if err != nil { - t.Errorf("The second message should have been produced successfully, but got %s", err) - } - if offset != 2 || offset != msg.Offset { - t.Errorf("The second message should have been assigned offset 2, but got %d", offset) - } - - _, _, err = sp.SendMessage(msg) - if err != sarama.ErrOutOfBrokers { - t.Errorf("The third message should not have been produced successfully") - } - - if err := sp.Close(); err != nil { - t.Error(err) - } -} - -func TestSyncProducerWithTooManyExpectations(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageAndSucceed() - sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) - - msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if _, _, err := sp.SendMessage(msg); err != nil { - t.Error("No error expected on first SendMessage call", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestSyncProducerWithTooFewExpectations(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageAndSucceed() - - msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if _, _, err := sp.SendMessage(msg); err != nil { - t.Error("No error expected on first SendMessage call", err) - } - if _, _, err := sp.SendMessage(msg); err != errOutOfExpectations { - t.Error("errOutOfExpectations expected on second SendMessage call, found:", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestSyncProducerWithCheckerFunction(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$")) - - msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if _, _, err := sp.SendMessage(msg); err != nil { - t.Error("No error expected on first SendMessage call, found: ", err) - } - msg = &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if _, _, err := sp.SendMessage(msg); err == nil || !strings.HasPrefix(err.Error(), "No match") { - t.Error("Error during value check expected on second SendMessage call, found:", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestSyncProducerWithCheckerFunctionForSendMessagesWithError(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$")) - - msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - msg2 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - msgs := []*sarama.ProducerMessage{msg1, msg2} - - if err := sp.SendMessages(msgs); err == nil || !strings.HasPrefix(err.Error(), "No match") { - t.Error("Error during value check expected on second message, found: ", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestSyncProducerWithCheckerFunctionForSendMessagesWithoutError(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) - - msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - msgs := []*sarama.ProducerMessage{msg1} - - if err := sp.SendMessages(msgs); err != nil { - t.Error("No error expected on SendMessages call, found: ", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 0 { - t.Errorf("Expected to not report any errors, found: %v", trm.errors) - } -} - -func TestSyncProducerSendMessagesExpectationsMismatchTooFew(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) - - msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - msg2 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - - msgs := []*sarama.ProducerMessage{msg1, msg2} - - if err := sp.SendMessages(msgs); err == nil { - t.Error("Error during value check expected on second message, found: ", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 2 { - t.Error("Expected to report 2 errors") - } -} - -func TestSyncProducerSendMessagesExpectationsMismatchTooMany(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) - - msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - msgs := []*sarama.ProducerMessage{msg1} - - if err := sp.SendMessages(msgs); err != nil { - t.Error("No error expected on SendMessages call, found: ", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report 1 errors") - } -} - -func TestSyncProducerSendMessagesFaultyEncoder(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes")) - - msg1 := &sarama.ProducerMessage{Topic: "test", Value: faultyEncoder("123")} - msgs := []*sarama.ProducerMessage{msg1} - - if err := sp.SendMessages(msgs); err == nil || !strings.HasPrefix(err.Error(), "encode error") { - t.Error("Encoding error expected, found: ", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report 1 errors") - } -} - -type faultyEncoder []byte - -func (f faultyEncoder) Encode() ([]byte, error) { - return nil, errors.New("encode error") -} - -func (f faultyEncoder) Length() int { - return len(f) -} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go index 37e99fbf..9931cade 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -52,12 +52,14 @@ type OffsetCommitRequest struct { // - 0 (kafka 0.8.1 and later) // - 1 (kafka 0.8.2 and later) // - 2 (kafka 0.9.0 and later) + // - 3 (kafka 0.11.0 and later) + // - 4 (kafka 2.0.0 and later) Version int16 blocks map[string]map[int32]*offsetCommitRequestBlock } func (r *OffsetCommitRequest) encode(pe packetEncoder) error { - if r.Version < 0 || r.Version > 2 { + if r.Version < 0 || r.Version > 4 { return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} } @@ -168,12 +170,20 @@ func (r *OffsetCommitRequest) version() int16 { return r.Version } +func (r *OffsetCommitRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: return V0_8_2_0 case 2: return V0_9_0_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 default: return MinVersion } @@ -194,11 +204,11 @@ func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset i func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) { partitions := r.blocks[topic] if partitions == nil { - return 0, "", errors.New("No such offset") + return 0, "", errors.New("no such offset") } block := partitions[partitionID] if block == nil { - return 0, "", errors.New("No such offset") + return 0, "", errors.New("no such offset") } return block.offset, block.metadata, nil } diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request_test.go b/vendor/github.com/Shopify/sarama/offset_commit_request_test.go deleted file mode 100644 index afc25b7b..00000000 --- a/vendor/github.com/Shopify/sarama/offset_commit_request_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package sarama - -import "testing" - -var ( - offsetCommitRequestNoBlocksV0 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x00} - - offsetCommitRequestNoBlocksV1 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x00} - - offsetCommitRequestNoBlocksV2 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, - 0x00, 0x00, 0x00, 0x00} - - offsetCommitRequestOneBlockV0 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x52, 0x21, - 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, - 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} - - offsetCommitRequestOneBlockV1 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x52, 0x21, - 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} - - offsetCommitRequestOneBlockV2 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x52, 0x21, - 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, - 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} -) - -func TestOffsetCommitRequestV0(t *testing.T) { - request := new(OffsetCommitRequest) - request.Version = 0 - request.ConsumerGroup = "foobar" - testRequest(t, "no blocks v0", request, offsetCommitRequestNoBlocksV0) - - request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") - testRequest(t, "one block v0", request, offsetCommitRequestOneBlockV0) -} - -func TestOffsetCommitRequestV1(t *testing.T) { - request := new(OffsetCommitRequest) - request.ConsumerGroup = "foobar" - request.ConsumerID = "cons" - request.ConsumerGroupGeneration = 0x1122 - request.Version = 1 - testRequest(t, "no blocks v1", request, offsetCommitRequestNoBlocksV1) - - request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata") - testRequest(t, "one block v1", request, offsetCommitRequestOneBlockV1) -} - -func TestOffsetCommitRequestV2(t *testing.T) { - request := new(OffsetCommitRequest) - request.ConsumerGroup = "foobar" - request.ConsumerID = "cons" - request.ConsumerGroupGeneration = 0x1122 - request.RetentionTime = 0x4433 - request.Version = 2 - testRequest(t, "no blocks v2", request, offsetCommitRequestNoBlocksV2) - - request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") - testRequest(t, "one block v2", request, offsetCommitRequestOneBlockV2) -} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go index a4b18acd..342260ef 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_response.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go @@ -1,7 +1,9 @@ package sarama type OffsetCommitResponse struct { - Errors map[string]map[int32]KError + Version int16 + ThrottleTimeMs int32 + Errors map[string]map[int32]KError } func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { @@ -17,6 +19,9 @@ func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KE } func (r *OffsetCommitResponse) encode(pe packetEncoder) error { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } if err := pe.putArrayLength(len(r.Errors)); err != nil { return err } @@ -36,6 +41,15 @@ func (r *OffsetCommitResponse) encode(pe packetEncoder) error { } func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + numTopics, err := pd.getArrayLength() if err != nil || numTopics == 0 { return err @@ -77,9 +91,24 @@ func (r *OffsetCommitResponse) key() int16 { } func (r *OffsetCommitResponse) version() int16 { + return r.Version +} + +func (r *OffsetCommitResponse) headerVersion() int16 { return 0 } func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { - return MinVersion + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_9_0_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + default: + return MinVersion + } } diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response_test.go b/vendor/github.com/Shopify/sarama/offset_commit_response_test.go deleted file mode 100644 index 074ec923..00000000 --- a/vendor/github.com/Shopify/sarama/offset_commit_response_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - emptyOffsetCommitResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} -) - -func TestEmptyOffsetCommitResponse(t *testing.T) { - response := OffsetCommitResponse{} - testResponse(t, "empty", &response, emptyOffsetCommitResponse) -} - -func TestNormalOffsetCommitResponse(t *testing.T) { - response := OffsetCommitResponse{} - response.AddError("t", 0, ErrNotLeaderForPartition) - response.Errors["m"] = make(map[int32]KError) - // The response encoded form cannot be checked for it varies due to - // unpredictable map traversal order. - testResponse(t, "normal", &response, nil) -} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go index 5a05014b..51e9faa3 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go +++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go @@ -1,28 +1,33 @@ package sarama type OffsetFetchRequest struct { - ConsumerGroup string Version int16 + ConsumerGroup string partitions map[string][]int32 } func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { - if r.Version < 0 || r.Version > 1 { + if r.Version < 0 || r.Version > 5 { return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} } if err = pe.putString(r.ConsumerGroup); err != nil { return err } - if err = pe.putArrayLength(len(r.partitions)); err != nil { - return err - } - for topic, partitions := range r.partitions { - if err = pe.putString(topic); err != nil { + + if r.Version >= 2 && r.partitions == nil { + pe.putInt32(-1) + } else { + if err = pe.putArrayLength(len(r.partitions)); err != nil { return err } - if err = pe.putInt32Array(partitions); err != nil { - return err + for topic, partitions := range r.partitions { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putInt32Array(partitions); err != nil { + return err + } } } return nil @@ -37,7 +42,7 @@ func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) if err != nil { return err } - if partitionCount == 0 { + if (partitionCount == 0 && version < 2) || partitionCount < 0 { return nil } r.partitions = make(map[string][]int32) @@ -63,15 +68,33 @@ func (r *OffsetFetchRequest) version() int16 { return r.Version } +func (r *OffsetFetchRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: return V0_8_2_0 + case 2: + return V0_10_2_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + case 5: + return V2_1_0_0 default: return MinVersion } } +func (r *OffsetFetchRequest) ZeroPartitions() { + if r.partitions == nil && r.Version >= 2 { + r.partitions = make(map[string][]int32) + } +} + func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { if r.partitions == nil { r.partitions = make(map[string][]int32) diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go b/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go deleted file mode 100644 index 025d725c..00000000 --- a/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package sarama - -import "testing" - -var ( - offsetFetchRequestNoGroupNoPartitions = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - offsetFetchRequestNoPartitions = []byte{ - 0x00, 0x04, 'b', 'l', 'a', 'h', - 0x00, 0x00, 0x00, 0x00} - - offsetFetchRequestOnePartition = []byte{ - 0x00, 0x04, 'b', 'l', 'a', 'h', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't', - 0x00, 0x00, 0x00, 0x01, - 0x4F, 0x4F, 0x4F, 0x4F} -) - -func TestOffsetFetchRequest(t *testing.T) { - request := new(OffsetFetchRequest) - testRequest(t, "no group, no partitions", request, offsetFetchRequestNoGroupNoPartitions) - - request.ConsumerGroup = "blah" - testRequest(t, "no partitions", request, offsetFetchRequestNoPartitions) - - request.AddPartition("topicTheFirst", 0x4F4F4F4F) - testRequest(t, "one partition", request, offsetFetchRequestOnePartition) -} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go index 11e4b1f3..9c64e070 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go +++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go @@ -1,17 +1,25 @@ package sarama type OffsetFetchResponseBlock struct { - Offset int64 - Metadata string - Err KError + Offset int64 + LeaderEpoch int32 + Metadata string + Err KError } -func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { +func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { b.Offset, err = pd.getInt64() if err != nil { return err } + if version >= 5 { + b.LeaderEpoch, err = pd.getInt32() + if err != nil { + return err + } + } + b.Metadata, err = pd.getString() if err != nil { return err @@ -26,9 +34,13 @@ func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { return nil } -func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { +func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { pe.putInt64(b.Offset) + if version >= 5 { + pe.putInt32(b.LeaderEpoch) + } + err = pe.putString(b.Metadata) if err != nil { return err @@ -40,10 +52,17 @@ func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { } type OffsetFetchResponse struct { - Blocks map[string]map[int32]*OffsetFetchResponseBlock + Version int16 + ThrottleTimeMs int32 + Blocks map[string]map[int32]*OffsetFetchResponseBlock + Err KError } func (r *OffsetFetchResponse) encode(pe packetEncoder) error { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } + if err := pe.putArrayLength(len(r.Blocks)); err != nil { return err } @@ -56,53 +75,75 @@ func (r *OffsetFetchResponse) encode(pe packetEncoder) error { } for partition, block := range partitions { pe.putInt32(partition) - if err := block.encode(pe); err != nil { + if err := block.encode(pe, r.Version); err != nil { return err } } } + if r.Version >= 2 { + pe.putInt16(int16(r.Err)) + } return nil } func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil || numTopics == 0 { - return err - } - - r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } + r.Version = version - numBlocks, err := pd.getArrayLength() + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() if err != nil { return err } + } - if numBlocks == 0 { - r.Blocks[name] = nil - continue - } - r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() + if numTopics > 0 { + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() if err != nil { return err } - block := new(OffsetFetchResponseBlock) - err = block.decode(pd) + numBlocks, err := pd.getArrayLength() if err != nil { return err } - r.Blocks[name][id] = block + + if numBlocks == 0 { + r.Blocks[name] = nil + continue + } + r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetFetchResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } } } + if version >= 2 { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + } + return nil } @@ -111,11 +152,28 @@ func (r *OffsetFetchResponse) key() int16 { } func (r *OffsetFetchResponse) version() int16 { + return r.Version +} + +func (r *OffsetFetchResponse) headerVersion() int16 { return 0 } func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { - return MinVersion + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_10_2_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + case 5: + return V2_1_0_0 + default: + return MinVersion + } } func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go b/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go deleted file mode 100644 index 7614ae42..00000000 --- a/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyOffsetFetchResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} -) - -func TestEmptyOffsetFetchResponse(t *testing.T) { - response := OffsetFetchResponse{} - testResponse(t, "empty", &response, emptyOffsetFetchResponse) -} - -func TestNormalOffsetFetchResponse(t *testing.T) { - response := OffsetFetchResponse{} - response.AddBlock("t", 0, &OffsetFetchResponseBlock{0, "md", ErrRequestTimedOut}) - response.Blocks["m"] = nil - // The response encoded form cannot be checked for it varies due to - // unpredictable map traversal order. - testResponse(t, "normal", &response, nil) -} diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go index 8ea857f8..4f480a08 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -19,6 +19,10 @@ type OffsetManager interface { // will otherwise leak memory. You must call this after all the // PartitionOffsetManagers are closed. Close() error + + // Commit commits the offsets. This method can be used if AutoCommit.Enable is + // set to false. + Commit() } type offsetManager struct { @@ -58,7 +62,6 @@ func newOffsetManagerFromClient(group, memberID string, generation int32, client client: client, conf: conf, group: group, - ticker: time.NewTicker(conf.Consumer.Offsets.CommitInterval), poms: make(map[string]map[int32]*partitionOffsetManager), memberID: memberID, @@ -67,7 +70,10 @@ func newOffsetManagerFromClient(group, memberID string, generation int32, client closing: make(chan none), closed: make(chan none), } - go withRecover(om.mainLoop) + if conf.Consumer.Offsets.AutoCommit.Enable { + om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval) + go withRecover(om.mainLoop) + } return om, nil } @@ -99,16 +105,20 @@ func (om *offsetManager) Close() error { om.closeOnce.Do(func() { // exit the mainLoop close(om.closing) - <-om.closed + if om.conf.Consumer.Offsets.AutoCommit.Enable { + <-om.closed + } // mark all POMs as closed om.asyncClosePOMs() // flush one last time - for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ { - om.flushToBroker() - if om.releasePOMs(false) == 0 { - break + if om.conf.Consumer.Offsets.AutoCommit.Enable { + for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ { + om.flushToBroker() + if om.releasePOMs(false) == 0 { + break + } } } @@ -120,6 +130,14 @@ func (om *offsetManager) Close() error { return nil } +func (om *offsetManager) computeBackoff(retries int) time.Duration { + if om.conf.Metadata.Retry.BackoffFunc != nil { + return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max) + } else { + return om.conf.Metadata.Retry.Backoff + } +} + func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) { broker, err := om.coordinator() if err != nil { @@ -161,10 +179,11 @@ func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retri if retries <= 0 { return 0, "", block.Err } + backoff := om.computeBackoff(retries) select { case <-om.closing: return 0, "", block.Err - case <-time.After(om.conf.Metadata.Retry.Backoff): + case <-time.After(backoff): } return om.fetchInitialOffset(topic, partition, retries-1) default: @@ -216,14 +235,18 @@ func (om *offsetManager) mainLoop() { for { select { case <-om.ticker.C: - om.flushToBroker() - om.releasePOMs(false) + om.Commit() case <-om.closing: return } } } +func (om *offsetManager) Commit() { + om.flushToBroker() + om.releasePOMs(false) +} + func (om *offsetManager) flushToBroker() { req := om.constructRequest() if req == nil { @@ -266,7 +289,6 @@ func (om *offsetManager) constructRequest() *OffsetCommitRequest { ConsumerID: om.memberID, ConsumerGroupGeneration: om.generation, } - } om.pomsLock.RLock() @@ -324,7 +346,6 @@ func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest pom.handleError(err) case ErrOffsetsLoadInProgress: // nothing wrong but we didn't commit, we'll get it next time round - break case ErrUnknownTopicOrPartition: // let the user know *and* try redispatching - if topic-auto-create is // enabled, redispatching should trigger a metadata req and create the @@ -567,6 +588,6 @@ func (pom *partitionOffsetManager) handleError(err error) { func (pom *partitionOffsetManager) release() { pom.releaseOnce.Do(func() { - go close(pom.errors) + close(pom.errors) }) } diff --git a/vendor/github.com/Shopify/sarama/offset_manager_test.go b/vendor/github.com/Shopify/sarama/offset_manager_test.go deleted file mode 100644 index 86d6f4eb..00000000 --- a/vendor/github.com/Shopify/sarama/offset_manager_test.go +++ /dev/null @@ -1,421 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -func initOffsetManager(t *testing.T, retention time.Duration) (om OffsetManager, - testClient Client, broker, coordinator *MockBroker) { - - config := NewConfig() - config.Metadata.Retry.Max = 1 - config.Consumer.Offsets.CommitInterval = 1 * time.Millisecond - config.Version = V0_9_0_0 - if retention > 0 { - config.Consumer.Offsets.Retention = retention - } - - broker = NewMockBroker(t, 1) - coordinator = NewMockBroker(t, 2) - - seedMeta := new(MetadataResponse) - seedMeta.AddBroker(coordinator.Addr(), coordinator.BrokerID()) - seedMeta.AddTopicPartition("my_topic", 0, 1, []int32{}, []int32{}, ErrNoError) - seedMeta.AddTopicPartition("my_topic", 1, 1, []int32{}, []int32{}, ErrNoError) - broker.Returns(seedMeta) - - var err error - testClient, err = NewClient([]string{broker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: coordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: coordinator.Port(), - }) - - om, err = NewOffsetManagerFromClient("group", testClient) - if err != nil { - t.Fatal(err) - } - - return om, testClient, broker, coordinator -} - -func initPartitionOffsetManager(t *testing.T, om OffsetManager, - coordinator *MockBroker, initialOffset int64, metadata string) PartitionOffsetManager { - - fetchResponse := new(OffsetFetchResponse) - fetchResponse.AddBlock("my_topic", 0, &OffsetFetchResponseBlock{ - Err: ErrNoError, - Offset: initialOffset, - Metadata: metadata, - }) - coordinator.Returns(fetchResponse) - - pom, err := om.ManagePartition("my_topic", 0) - if err != nil { - t.Fatal(err) - } - - return pom -} - -func TestNewOffsetManager(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - seedBroker.Returns(new(MetadataResponse)) - defer seedBroker.Close() - - testClient, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - om, err := NewOffsetManagerFromClient("group", testClient) - if err != nil { - t.Error(err) - } - safeClose(t, om) - safeClose(t, testClient) - - _, err = NewOffsetManagerFromClient("group", testClient) - if err != ErrClosedClient { - t.Errorf("Error expected for closed client; actual value: %v", err) - } -} - -// Test recovery from ErrNotCoordinatorForConsumer -// on first fetchInitialOffset call -func TestOffsetManagerFetchInitialFail(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t, 0) - - // Error on first fetchInitialOffset call - responseBlock := OffsetFetchResponseBlock{ - Err: ErrNotCoordinatorForConsumer, - Offset: 5, - Metadata: "test_meta", - } - - fetchResponse := new(OffsetFetchResponse) - fetchResponse.AddBlock("my_topic", 0, &responseBlock) - coordinator.Returns(fetchResponse) - - // Refresh coordinator - newCoordinator := NewMockBroker(t, 3) - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: newCoordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: newCoordinator.Port(), - }) - - // Second fetchInitialOffset call is fine - fetchResponse2 := new(OffsetFetchResponse) - responseBlock2 := responseBlock - responseBlock2.Err = ErrNoError - fetchResponse2.AddBlock("my_topic", 0, &responseBlock2) - newCoordinator.Returns(fetchResponse2) - - pom, err := om.ManagePartition("my_topic", 0) - if err != nil { - t.Error(err) - } - - broker.Close() - coordinator.Close() - newCoordinator.Close() - safeClose(t, pom) - safeClose(t, om) - safeClose(t, testClient) -} - -// Test fetchInitialOffset retry on ErrOffsetsLoadInProgress -func TestOffsetManagerFetchInitialLoadInProgress(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t, 0) - - // Error on first fetchInitialOffset call - responseBlock := OffsetFetchResponseBlock{ - Err: ErrOffsetsLoadInProgress, - Offset: 5, - Metadata: "test_meta", - } - - fetchResponse := new(OffsetFetchResponse) - fetchResponse.AddBlock("my_topic", 0, &responseBlock) - coordinator.Returns(fetchResponse) - - // Second fetchInitialOffset call is fine - fetchResponse2 := new(OffsetFetchResponse) - responseBlock2 := responseBlock - responseBlock2.Err = ErrNoError - fetchResponse2.AddBlock("my_topic", 0, &responseBlock2) - coordinator.Returns(fetchResponse2) - - pom, err := om.ManagePartition("my_topic", 0) - if err != nil { - t.Error(err) - } - - broker.Close() - coordinator.Close() - safeClose(t, pom) - safeClose(t, om) - safeClose(t, testClient) -} - -func TestPartitionOffsetManagerInitialOffset(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t, 0) - testClient.Config().Consumer.Offsets.Initial = OffsetOldest - - // Kafka returns -1 if no offset has been stored for this partition yet. - pom := initPartitionOffsetManager(t, om, coordinator, -1, "") - - offset, meta := pom.NextOffset() - if offset != OffsetOldest { - t.Errorf("Expected offset 5. Actual: %v", offset) - } - if meta != "" { - t.Errorf("Expected metadata to be empty. Actual: %q", meta) - } - - safeClose(t, pom) - safeClose(t, om) - broker.Close() - coordinator.Close() - safeClose(t, testClient) -} - -func TestPartitionOffsetManagerNextOffset(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t, 0) - pom := initPartitionOffsetManager(t, om, coordinator, 5, "test_meta") - - offset, meta := pom.NextOffset() - if offset != 5 { - t.Errorf("Expected offset 5. Actual: %v", offset) - } - if meta != "test_meta" { - t.Errorf("Expected metadata \"test_meta\". Actual: %q", meta) - } - - safeClose(t, pom) - safeClose(t, om) - broker.Close() - coordinator.Close() - safeClose(t, testClient) -} - -func TestPartitionOffsetManagerResetOffset(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t, 0) - pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") - - ocResponse := new(OffsetCommitResponse) - ocResponse.AddError("my_topic", 0, ErrNoError) - coordinator.Returns(ocResponse) - - expected := int64(1) - pom.ResetOffset(expected, "modified_meta") - actual, meta := pom.NextOffset() - - if actual != expected { - t.Errorf("Expected offset %v. Actual: %v", expected, actual) - } - if meta != "modified_meta" { - t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) - } - - safeClose(t, pom) - safeClose(t, om) - safeClose(t, testClient) - broker.Close() - coordinator.Close() -} - -func TestPartitionOffsetManagerResetOffsetWithRetention(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t, time.Hour) - pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") - - ocResponse := new(OffsetCommitResponse) - ocResponse.AddError("my_topic", 0, ErrNoError) - handler := func(req *request) (res encoder) { - if req.body.version() != 2 { - t.Errorf("Expected to be using version 2. Actual: %v", req.body.version()) - } - offsetCommitRequest := req.body.(*OffsetCommitRequest) - if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) { - t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime) - } - return ocResponse - } - coordinator.setHandler(handler) - - expected := int64(1) - pom.ResetOffset(expected, "modified_meta") - actual, meta := pom.NextOffset() - - if actual != expected { - t.Errorf("Expected offset %v. Actual: %v", expected, actual) - } - if meta != "modified_meta" { - t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) - } - - safeClose(t, pom) - safeClose(t, om) - safeClose(t, testClient) - broker.Close() - coordinator.Close() -} - -func TestPartitionOffsetManagerMarkOffset(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t, 0) - pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") - - ocResponse := new(OffsetCommitResponse) - ocResponse.AddError("my_topic", 0, ErrNoError) - coordinator.Returns(ocResponse) - - pom.MarkOffset(100, "modified_meta") - offset, meta := pom.NextOffset() - - if offset != 100 { - t.Errorf("Expected offset 100. Actual: %v", offset) - } - if meta != "modified_meta" { - t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) - } - - safeClose(t, pom) - safeClose(t, om) - safeClose(t, testClient) - broker.Close() - coordinator.Close() -} - -func TestPartitionOffsetManagerMarkOffsetWithRetention(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t, time.Hour) - pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") - - ocResponse := new(OffsetCommitResponse) - ocResponse.AddError("my_topic", 0, ErrNoError) - handler := func(req *request) (res encoder) { - if req.body.version() != 2 { - t.Errorf("Expected to be using version 2. Actual: %v", req.body.version()) - } - offsetCommitRequest := req.body.(*OffsetCommitRequest) - if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) { - t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime) - } - return ocResponse - } - coordinator.setHandler(handler) - - pom.MarkOffset(100, "modified_meta") - offset, meta := pom.NextOffset() - - if offset != 100 { - t.Errorf("Expected offset 100. Actual: %v", offset) - } - if meta != "modified_meta" { - t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) - } - - safeClose(t, pom) - safeClose(t, om) - safeClose(t, testClient) - broker.Close() - coordinator.Close() -} - -func TestPartitionOffsetManagerCommitErr(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t, 0) - pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta") - - // Error on one partition - ocResponse := new(OffsetCommitResponse) - ocResponse.AddError("my_topic", 0, ErrOffsetOutOfRange) - ocResponse.AddError("my_topic", 1, ErrNoError) - coordinator.Returns(ocResponse) - - newCoordinator := NewMockBroker(t, 3) - - // For RefreshCoordinator() - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: newCoordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: newCoordinator.Port(), - }) - - // Nothing in response.Errors at all - ocResponse2 := new(OffsetCommitResponse) - newCoordinator.Returns(ocResponse2) - - // No error, no need to refresh coordinator - - // Error on the wrong partition for this pom - ocResponse3 := new(OffsetCommitResponse) - ocResponse3.AddError("my_topic", 1, ErrNoError) - newCoordinator.Returns(ocResponse3) - - // No error, no need to refresh coordinator - - // ErrUnknownTopicOrPartition/ErrNotLeaderForPartition/ErrLeaderNotAvailable block - ocResponse4 := new(OffsetCommitResponse) - ocResponse4.AddError("my_topic", 0, ErrUnknownTopicOrPartition) - newCoordinator.Returns(ocResponse4) - - // For RefreshCoordinator() - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: newCoordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: newCoordinator.Port(), - }) - - // Normal error response - ocResponse5 := new(OffsetCommitResponse) - ocResponse5.AddError("my_topic", 0, ErrNoError) - newCoordinator.Returns(ocResponse5) - - pom.MarkOffset(100, "modified_meta") - - err := pom.Close() - if err != nil { - t.Error(err) - } - - broker.Close() - coordinator.Close() - newCoordinator.Close() - safeClose(t, om) - safeClose(t, testClient) -} - -// Test of recovery from abort -func TestAbortPartitionOffsetManager(t *testing.T) { - om, testClient, broker, coordinator := initOffsetManager(t, 0) - pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta") - - // this triggers an error in the CommitOffset request, - // which leads to the abort call - coordinator.Close() - - // Response to refresh coordinator request - newCoordinator := NewMockBroker(t, 3) - broker.Returns(&ConsumerMetadataResponse{ - CoordinatorID: newCoordinator.BrokerID(), - CoordinatorHost: "127.0.0.1", - CoordinatorPort: newCoordinator.Port(), - }) - - ocResponse := new(OffsetCommitResponse) - ocResponse.AddError("my_topic", 0, ErrNoError) - newCoordinator.Returns(ocResponse) - - pom.MarkOffset(100, "modified_meta") - - safeClose(t, pom) - safeClose(t, om) - broker.Close() - safeClose(t, testClient) -} diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go index 4c5df75d..c0b3305f 100644 --- a/vendor/github.com/Shopify/sarama/offset_request.go +++ b/vendor/github.com/Shopify/sarama/offset_request.go @@ -6,7 +6,7 @@ type offsetRequestBlock struct { } func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { - pe.putInt64(int64(b.time)) + pe.putInt64(b.time) if version == 0 { pe.putInt32(b.maxOffsets) } @@ -27,12 +27,20 @@ func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) } type OffsetRequest struct { - Version int16 - blocks map[string]map[int32]*offsetRequestBlock + Version int16 + replicaID int32 + isReplicaIDSet bool + blocks map[string]map[int32]*offsetRequestBlock } func (r *OffsetRequest) encode(pe packetEncoder) error { - pe.putInt32(-1) // replica ID is always -1 for clients + if r.isReplicaIDSet { + pe.putInt32(r.replicaID) + } else { + // default replica ID is always -1 for clients + pe.putInt32(-1) + } + err := pe.putArrayLength(len(r.blocks)) if err != nil { return err @@ -59,10 +67,14 @@ func (r *OffsetRequest) encode(pe packetEncoder) error { func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { r.Version = version - // Ignore replica ID - if _, err := pd.getInt32(); err != nil { + replicaID, err := pd.getInt32() + if err != nil { return err } + if replicaID >= 0 { + r.SetReplicaID(replicaID) + } + blockCount, err := pd.getArrayLength() if err != nil { return err @@ -104,6 +116,10 @@ func (r *OffsetRequest) version() int16 { return r.Version } +func (r *OffsetRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: @@ -113,6 +129,18 @@ func (r *OffsetRequest) requiredVersion() KafkaVersion { } } +func (r *OffsetRequest) SetReplicaID(id int32) { + r.replicaID = id + r.isReplicaIDSet = true +} + +func (r *OffsetRequest) ReplicaID() int32 { + if r.isReplicaIDSet { + return r.replicaID + } + return -1 +} + func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetRequestBlock) diff --git a/vendor/github.com/Shopify/sarama/offset_request_test.go b/vendor/github.com/Shopify/sarama/offset_request_test.go deleted file mode 100644 index 9ce562c9..00000000 --- a/vendor/github.com/Shopify/sarama/offset_request_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package sarama - -import "testing" - -var ( - offsetRequestNoBlocks = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x00} - - offsetRequestOneBlock = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x04, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x02} - - offsetRequestOneBlockV1 = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x03, 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x04, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} -) - -func TestOffsetRequest(t *testing.T) { - request := new(OffsetRequest) - testRequest(t, "no blocks", request, offsetRequestNoBlocks) - - request.AddBlock("foo", 4, 1, 2) - testRequest(t, "one block", request, offsetRequestOneBlock) -} - -func TestOffsetRequestV1(t *testing.T) { - request := new(OffsetRequest) - request.Version = 1 - testRequest(t, "no blocks", request, offsetRequestNoBlocks) - - request.AddBlock("bar", 4, 1, 2) // Last argument is ignored for V1 - testRequest(t, "one block", request, offsetRequestOneBlockV1) -} diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go index 8b2193f9..ead3ebbc 100644 --- a/vendor/github.com/Shopify/sarama/offset_response.go +++ b/vendor/github.com/Shopify/sarama/offset_response.go @@ -150,6 +150,10 @@ func (r *OffsetResponse) version() int16 { return r.Version } +func (r *OffsetResponse) headerVersion() int16 { + return 0 +} + func (r *OffsetResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/offset_response_test.go b/vendor/github.com/Shopify/sarama/offset_response_test.go deleted file mode 100644 index 0df6c9f3..00000000 --- a/vendor/github.com/Shopify/sarama/offset_response_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyOffsetResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} - - normalOffsetResponse = []byte{ - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x01, 'a', - 0x00, 0x00, 0x00, 0x00, - - 0x00, 0x01, 'z', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06} - - normalOffsetResponseV1 = []byte{ - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x01, 'a', - 0x00, 0x00, 0x00, 0x00, - - 0x00, 0x01, 'z', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, - 0x00, 0x00, 0x01, 0x58, 0x1A, 0xE6, 0x48, 0x86, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06} -) - -func TestEmptyOffsetResponse(t *testing.T) { - response := OffsetResponse{} - - testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 0) - if len(response.Blocks) != 0 { - t.Error("Decoding produced", len(response.Blocks), "topics where there were none.") - } - - response = OffsetResponse{} - - testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 1) - if len(response.Blocks) != 0 { - t.Error("Decoding produced", len(response.Blocks), "topics where there were none.") - } -} - -func TestNormalOffsetResponse(t *testing.T) { - response := OffsetResponse{} - - testVersionDecodable(t, "normal", &response, normalOffsetResponse, 0) - - if len(response.Blocks) != 2 { - t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.") - } - - if len(response.Blocks["a"]) != 0 { - t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.") - } - - if len(response.Blocks["z"]) != 1 { - t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.") - } - - if response.Blocks["z"][2].Err != ErrNoError { - t.Fatal("Decoding produced invalid error for topic z partition 2.") - } - - if len(response.Blocks["z"][2].Offsets) != 2 { - t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.") - } - - if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 { - t.Fatal("Decoding produced invalid offsets for topic z partition 2.") - } -} - -func TestNormalOffsetResponseV1(t *testing.T) { - response := OffsetResponse{} - - testVersionDecodable(t, "normal", &response, normalOffsetResponseV1, 1) - - if len(response.Blocks) != 2 { - t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.") - } - - if len(response.Blocks["a"]) != 0 { - t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.") - } - - if len(response.Blocks["z"]) != 1 { - t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.") - } - - if response.Blocks["z"][2].Err != ErrNoError { - t.Fatal("Decoding produced invalid error for topic z partition 2.") - } - - if response.Blocks["z"][2].Timestamp != 1477920049286 { - t.Fatal("Decoding produced invalid timestamp for topic z partition 2.", response.Blocks["z"][2].Timestamp) - } - - if response.Blocks["z"][2].Offset != 6 { - t.Fatal("Decoding produced invalid offsets for topic z partition 2.") - } -} diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go index 74805ccb..ed00ba35 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -10,8 +10,11 @@ type packetDecoder interface { getInt32() (int32, error) getInt64() (int64, error) getVarint() (int64, error) + getUVarint() (uint64, error) getArrayLength() (int, error) + getCompactArrayLength() (int, error) getBool() (bool, error) + getEmptyTaggedFieldArray() (int, error) // Collections getBytes() ([]byte, error) @@ -19,6 +22,9 @@ type packetDecoder interface { getRawBytes(length int) ([]byte, error) getString() (string, error) getNullableString() (*string, error) + getCompactString() (string, error) + getCompactNullableString() (*string, error) + getCompactInt32Array() ([]int32, error) getInt32Array() ([]int32, error) getInt64Array() ([]int64, error) getStringArray() ([]string, error) @@ -27,6 +33,7 @@ type packetDecoder interface { remaining() int getSubset(length int) (packetDecoder, error) peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset + peekInt8(offset int) (int8, error) // similar to peek, but just one byte // Stacks, see PushDecoder push(in pushDecoder) error diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go index 67b8daed..50c735c0 100644 --- a/vendor/github.com/Shopify/sarama/packet_encoder.go +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -12,6 +12,8 @@ type packetEncoder interface { putInt32(in int32) putInt64(in int64) putVarint(in int64) + putUVarint(in uint64) + putCompactArrayLength(in int) putArrayLength(in int) error putBool(in bool) @@ -19,11 +21,16 @@ type packetEncoder interface { putBytes(in []byte) error putVarintBytes(in []byte) error putRawBytes(in []byte) error + putCompactString(in string) error + putNullableCompactString(in *string) error putString(in string) error putNullableString(in *string) error putStringArray(in []string) error + putCompactInt32Array(in []int32) error + putNullableCompactInt32Array(in []int32) error putInt32Array(in []int32) error putInt64Array(in []int64) error + putEmptyTaggedFieldArray() // Provide the current offset to record the batch size metric offset() int diff --git a/vendor/github.com/Shopify/sarama/partitioner_test.go b/vendor/github.com/Shopify/sarama/partitioner_test.go deleted file mode 100644 index f6dde020..00000000 --- a/vendor/github.com/Shopify/sarama/partitioner_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package sarama - -import ( - "crypto/rand" - "hash/fnv" - "log" - "testing" -) - -func assertPartitioningConsistent(t *testing.T, partitioner Partitioner, message *ProducerMessage, numPartitions int32) { - choice, err := partitioner.Partition(message, numPartitions) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= numPartitions { - t.Error(partitioner, "returned partition", choice, "outside of range for", message) - } - for i := 1; i < 50; i++ { - newChoice, err := partitioner.Partition(message, numPartitions) - if err != nil { - t.Error(partitioner, err) - } - if newChoice != choice { - t.Error(partitioner, "returned partition", newChoice, "inconsistent with", choice, ".") - } - } -} - -func TestRandomPartitioner(t *testing.T) { - partitioner := NewRandomPartitioner("mytopic") - - choice, err := partitioner.Partition(nil, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - for i := 1; i < 50; i++ { - choice, err := partitioner.Partition(nil, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= 50 { - t.Error("Returned partition", choice, "outside of range.") - } - } -} - -func TestRoundRobinPartitioner(t *testing.T) { - partitioner := NewRoundRobinPartitioner("mytopic") - - choice, err := partitioner.Partition(nil, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - var i int32 - for i = 1; i < 50; i++ { - choice, err := partitioner.Partition(nil, 7) - if err != nil { - t.Error(partitioner, err) - } - if choice != i%7 { - t.Error("Returned partition", choice, "expecting", i%7) - } - } -} - -func TestNewHashPartitionerWithHasher(t *testing.T) { - // use the current default hasher fnv.New32a() - partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic") - - choice, err := partitioner.Partition(&ProducerMessage{}, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - for i := 1; i < 50; i++ { - choice, err := partitioner.Partition(&ProducerMessage{}, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= 50 { - t.Error("Returned partition", choice, "outside of range for nil key.") - } - } - - buf := make([]byte, 256) - for i := 1; i < 50; i++ { - if _, err := rand.Read(buf); err != nil { - t.Error(err) - } - assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50) - } -} - -func TestHashPartitionerWithHasherMinInt32(t *testing.T) { - // use the current default hasher fnv.New32a() - partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic") - - msg := ProducerMessage{} - // "1468509572224" generates 2147483648 (uint32) result from Sum32 function - // which is -2147483648 or int32's min value - msg.Key = StringEncoder("1468509572224") - - choice, err := partitioner.Partition(&msg, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= 50 { - t.Error("Returned partition", choice, "outside of range for nil key.") - } -} - -func TestHashPartitioner(t *testing.T) { - partitioner := NewHashPartitioner("mytopic") - - choice, err := partitioner.Partition(&ProducerMessage{}, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - for i := 1; i < 50; i++ { - choice, err := partitioner.Partition(&ProducerMessage{}, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= 50 { - t.Error("Returned partition", choice, "outside of range for nil key.") - } - } - - buf := make([]byte, 256) - for i := 1; i < 50; i++ { - if _, err := rand.Read(buf); err != nil { - t.Error(err) - } - assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50) - } -} - -func TestHashPartitionerConsistency(t *testing.T) { - partitioner := NewHashPartitioner("mytopic") - ep, ok := partitioner.(DynamicConsistencyPartitioner) - - if !ok { - t.Error("Hash partitioner does not implement DynamicConsistencyPartitioner") - } - - consistency := ep.MessageRequiresConsistency(&ProducerMessage{Key: StringEncoder("hi")}) - if !consistency { - t.Error("Messages with keys should require consistency") - } - consistency = ep.MessageRequiresConsistency(&ProducerMessage{}) - if consistency { - t.Error("Messages without keys should require consistency") - } -} - -func TestHashPartitionerMinInt32(t *testing.T) { - partitioner := NewHashPartitioner("mytopic") - - msg := ProducerMessage{} - // "1468509572224" generates 2147483648 (uint32) result from Sum32 function - // which is -2147483648 or int32's min value - msg.Key = StringEncoder("1468509572224") - - choice, err := partitioner.Partition(&msg, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= 50 { - t.Error("Returned partition", choice, "outside of range for nil key.") - } -} - -func TestManualPartitioner(t *testing.T) { - partitioner := NewManualPartitioner("mytopic") - - choice, err := partitioner.Partition(&ProducerMessage{}, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - for i := int32(1); i < 50; i++ { - choice, err := partitioner.Partition(&ProducerMessage{Partition: i}, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice != i { - t.Error("Returned partition not the same as the input partition") - } - } -} - -// By default, Sarama uses the message's key to consistently assign a partition to -// a message using hashing. If no key is set, a random partition will be chosen. -// This example shows how you can partition messages randomly, even when a key is set, -// by overriding Config.Producer.Partitioner. -func ExamplePartitioner_random() { - config := NewConfig() - config.Producer.Partitioner = NewRandomPartitioner - - producer, err := NewSyncProducer([]string{"localhost:9092"}, config) - if err != nil { - log.Fatal(err) - } - defer func() { - if err := producer.Close(); err != nil { - log.Println("Failed to close producer:", err) - } - }() - - msg := &ProducerMessage{Topic: "test", Key: StringEncoder("key is set"), Value: StringEncoder("test")} - partition, offset, err := producer.SendMessage(msg) - if err != nil { - log.Fatalln("Failed to produce message to kafka cluster.") - } - - log.Printf("Produced message to partition %d with offset %d", partition, offset) -} - -// This example shows how to assign partitions to your messages manually. -func ExamplePartitioner_manual() { - config := NewConfig() - - // First, we tell the producer that we are going to partition ourselves. - config.Producer.Partitioner = NewManualPartitioner - - producer, err := NewSyncProducer([]string{"localhost:9092"}, config) - if err != nil { - log.Fatal(err) - } - defer func() { - if err := producer.Close(); err != nil { - log.Println("Failed to close producer:", err) - } - }() - - // Now, we set the Partition field of the ProducerMessage struct. - msg := &ProducerMessage{Topic: "test", Partition: 6, Value: StringEncoder("test")} - - partition, offset, err := producer.SendMessage(msg) - if err != nil { - log.Fatalln("Failed to produce message to kafka cluster.") - } - - if partition != 6 { - log.Fatal("Message should have been produced to partition 6!") - } - - log.Printf("Produced message to partition %d with offset %d", partition, offset) -} - -// This example shows how to set a different partitioner depending on the topic. -func ExamplePartitioner_per_topic() { - config := NewConfig() - config.Producer.Partitioner = func(topic string) Partitioner { - switch topic { - case "access_log", "error_log": - return NewRandomPartitioner(topic) - - default: - return NewHashPartitioner(topic) - } - } - - // ... -} diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go index b633cd15..827542c5 100644 --- a/vendor/github.com/Shopify/sarama/prep_encoder.go +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -2,6 +2,7 @@ package sarama import ( "encoding/binary" + "errors" "fmt" "math" @@ -36,6 +37,11 @@ func (pe *prepEncoder) putVarint(in int64) { pe.length += binary.PutVarint(buf[:], in) } +func (pe *prepEncoder) putUVarint(in uint64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutUvarint(buf[:], in) +} + func (pe *prepEncoder) putArrayLength(in int) error { if in > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} @@ -44,6 +50,10 @@ func (pe *prepEncoder) putArrayLength(in int) error { return nil } +func (pe *prepEncoder) putCompactArrayLength(in int) { + pe.putUVarint(uint64(in + 1)) +} + func (pe *prepEncoder) putBool(in bool) { pe.length++ } @@ -67,6 +77,20 @@ func (pe *prepEncoder) putVarintBytes(in []byte) error { return pe.putRawBytes(in) } +func (pe *prepEncoder) putCompactString(in string) error { + pe.putCompactArrayLength(len(in)) + return pe.putRawBytes([]byte(in)) +} + +func (pe *prepEncoder) putNullableCompactString(in *string) error { + if in == nil { + pe.putUVarint(0) + return nil + } else { + return pe.putCompactString(*in) + } +} + func (pe *prepEncoder) putRawBytes(in []byte) error { if len(in) > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} @@ -107,6 +131,27 @@ func (pe *prepEncoder) putStringArray(in []string) error { return nil } +func (pe *prepEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + pe.putUVarint(0) + return nil + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + func (pe *prepEncoder) putInt32Array(in []int32) error { err := pe.putArrayLength(len(in)) if err != nil { @@ -125,6 +170,10 @@ func (pe *prepEncoder) putInt64Array(in []int64) error { return nil } +func (pe *prepEncoder) putEmptyTaggedFieldArray() { + pe.putUVarint(0) +} + func (pe *prepEncoder) offset() int { return pe.length } diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go index 0c755d02..0034651e 100644 --- a/vendor/github.com/Shopify/sarama/produce_request.go +++ b/vendor/github.com/Shopify/sarama/produce_request.go @@ -206,6 +206,10 @@ func (r *ProduceRequest) version() int16 { return r.Version } +func (r *ProduceRequest) headerVersion() int16 { + return 1 +} + func (r *ProduceRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: @@ -214,6 +218,8 @@ func (r *ProduceRequest) requiredVersion() KafkaVersion { return V0_10_0_0 case 3: return V0_11_0_0 + case 7: + return V2_1_0_0 default: return MinVersion } diff --git a/vendor/github.com/Shopify/sarama/produce_request_test.go b/vendor/github.com/Shopify/sarama/produce_request_test.go deleted file mode 100644 index b9896eb6..00000000 --- a/vendor/github.com/Shopify/sarama/produce_request_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - produceRequestEmpty = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - produceRequestHeader = []byte{ - 0x01, 0x23, - 0x00, 0x00, 0x04, 0x44, - 0x00, 0x00, 0x00, 0x00} - - produceRequestOneMessage = []byte{ - 0x01, 0x23, - 0x00, 0x00, 0x04, 0x44, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0xAD, - 0x00, 0x00, 0x00, 0x1C, - // messageSet - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x10, - // message - 0x23, 0x96, 0x4a, 0xf7, // CRC - 0x00, - 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE} - - produceRequestOneRecord = []byte{ - 0xFF, 0xFF, // Transaction ID - 0x01, 0x23, // Required Acks - 0x00, 0x00, 0x04, 0x44, // Timeout - 0x00, 0x00, 0x00, 0x01, // Number of Topics - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic - 0x00, 0x00, 0x00, 0x01, // Number of Partitions - 0x00, 0x00, 0x00, 0xAD, // Partition - 0x00, 0x00, 0x00, 0x52, // Records length - // recordBatch - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x46, - 0x00, 0x00, 0x00, 0x00, - 0x02, - 0xCA, 0x33, 0xBC, 0x05, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x01, 0x58, 0x8D, 0xCD, 0x59, 0x38, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - // record - 0x28, - 0x00, - 0x0A, - 0x00, - 0x08, 0x01, 0x02, 0x03, 0x04, - 0x06, 0x05, 0x06, 0x07, - 0x02, - 0x06, 0x08, 0x09, 0x0A, - 0x04, 0x0B, 0x0C, - } -) - -func TestProduceRequest(t *testing.T) { - request := new(ProduceRequest) - testRequest(t, "empty", request, produceRequestEmpty) - - request.RequiredAcks = 0x123 - request.Timeout = 0x444 - testRequest(t, "header", request, produceRequestHeader) - - request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}}) - testRequest(t, "one message", request, produceRequestOneMessage) - - request.Version = 3 - batch := &RecordBatch{ - LastOffsetDelta: 1, - Version: 2, - FirstTimestamp: time.Unix(1479847795, 0), - MaxTimestamp: time.Unix(0, 0), - Records: []*Record{{ - TimestampDelta: 5 * time.Millisecond, - Key: []byte{0x01, 0x02, 0x03, 0x04}, - Value: []byte{0x05, 0x06, 0x07}, - Headers: []*RecordHeader{{ - Key: []byte{0x08, 0x09, 0x0A}, - Value: []byte{0x0B, 0x0C}, - }}, - }}, - } - request.AddBatch("topic", 0xAD, batch) - packet := testRequestEncode(t, "one record", request, produceRequestOneRecord) - // compressRecords field is not populated on decoding because consumers - // are only interested in decoded records. - batch.compressedRecords = nil - testRequestDecode(t, "one record", request, packet) -} diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go index 667e34c6..edf97879 100644 --- a/vendor/github.com/Shopify/sarama/produce_response.go +++ b/vendor/github.com/Shopify/sarama/produce_response.go @@ -5,11 +5,27 @@ import ( "time" ) +// Protocol, http://kafka.apache.org/protocol.html +// v1 +// v2 = v3 = v4 +// v5 = v6 = v7 +// Produce Response (Version: 7) => [responses] throttle_time_ms +// responses => topic [partition_responses] +// topic => STRING +// partition_responses => partition error_code base_offset log_append_time log_start_offset +// partition => INT32 +// error_code => INT16 +// base_offset => INT64 +// log_append_time => INT64 +// log_start_offset => INT64 +// throttle_time_ms => INT32 + +// partition_responses in protocol type ProduceResponseBlock struct { - Err KError - Offset int64 - // only provided if Version >= 2 and the broker is configured with `LogAppendTime` - Timestamp time.Time + Err KError // v0, error_code + Offset int64 // v0, base_offset + Timestamp time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime` + StartOffset int64 // v5, log_start_offset } func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { @@ -32,6 +48,13 @@ func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err erro } } + if version >= 5 { + b.StartOffset, err = pd.getInt64() + if err != nil { + return err + } + } + return nil } @@ -49,13 +72,17 @@ func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err erro pe.putInt64(timestamp) } + if version >= 5 { + pe.putInt64(b.StartOffset) + } + return nil } type ProduceResponse struct { - Blocks map[string]map[int32]*ProduceResponseBlock + Blocks map[string]map[int32]*ProduceResponseBlock // v0, responses Version int16 - ThrottleTime time.Duration // only provided if Version >= 1 + ThrottleTime time.Duration // v1, throttle_time_ms } func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { @@ -129,6 +156,7 @@ func (r *ProduceResponse) encode(pe packetEncoder) error { } } } + if r.Version >= 1 { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) } @@ -143,17 +171,12 @@ func (r *ProduceResponse) version() int16 { return r.Version } +func (r *ProduceResponse) headerVersion() int16 { + return 0 +} + func (r *ProduceResponse) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_11_0_0 - default: - return MinVersion - } + return MinVersion } func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { @@ -179,5 +202,11 @@ func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err K byTopic = make(map[int32]*ProduceResponseBlock) r.Blocks[topic] = byTopic } - byTopic[partition] = &ProduceResponseBlock{Err: err} + block := &ProduceResponseBlock{ + Err: err, + } + if r.Version >= 2 { + block.Timestamp = time.Now() + } + byTopic[partition] = block } diff --git a/vendor/github.com/Shopify/sarama/produce_response_test.go b/vendor/github.com/Shopify/sarama/produce_response_test.go deleted file mode 100644 index 197c7fb5..00000000 --- a/vendor/github.com/Shopify/sarama/produce_response_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package sarama - -import ( - "fmt" - "testing" - "time" -) - -var ( - produceResponseNoBlocksV0 = []byte{ - 0x00, 0x00, 0x00, 0x00} - - produceResponseManyBlocksVersions = [][]byte{ - { - 0x00, 0x00, 0x00, 0x01, - - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0x00, 0x01, - - 0x00, 0x00, 0x00, 0x01, // Partition 1 - 0x00, 0x02, // ErrInvalidMessage - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255 - }, { - 0x00, 0x00, 0x00, 0x01, - - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0x00, 0x01, - - 0x00, 0x00, 0x00, 0x01, // Partition 1 - 0x00, 0x02, // ErrInvalidMessage - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255 - - 0x00, 0x00, 0x00, 0x64, // 100 ms throttle time - }, { - 0x00, 0x00, 0x00, 0x01, - - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0x00, 0x01, - - 0x00, 0x00, 0x00, 0x01, // Partition 1 - 0x00, 0x02, // ErrInvalidMessage - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE8, // Timestamp January 1st 0001 at 00:00:01,000 UTC (LogAppendTime was used) - - 0x00, 0x00, 0x00, 0x64, // 100 ms throttle time - }, - } -) - -func TestProduceResponseDecode(t *testing.T) { - response := ProduceResponse{} - - testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocksV0, 0) - if len(response.Blocks) != 0 { - t.Error("Decoding produced", len(response.Blocks), "topics where there were none") - } - - for v, produceResponseManyBlocks := range produceResponseManyBlocksVersions { - t.Logf("Decoding produceResponseManyBlocks version %d", v) - testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, int16(v)) - if len(response.Blocks) != 1 { - t.Error("Decoding produced", len(response.Blocks), "topics where there was 1") - } - if len(response.Blocks["foo"]) != 1 { - t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there was one") - } - block := response.GetBlock("foo", 1) - if block == nil { - t.Error("Decoding did not produce a block for foo/1") - } else { - if block.Err != ErrInvalidMessage { - t.Error("Decoding failed for foo/2/Err, got:", int16(block.Err)) - } - if block.Offset != 255 { - t.Error("Decoding failed for foo/1/Offset, got:", block.Offset) - } - if v >= 2 { - if block.Timestamp != time.Unix(1, 0) { - t.Error("Decoding failed for foo/2/Timestamp, got:", block.Timestamp) - } - } - } - if v >= 1 { - if expected := 100 * time.Millisecond; response.ThrottleTime != expected { - t.Error("Failed decoding produced throttle time, expected:", expected, ", got:", response.ThrottleTime) - } - } - } -} - -func TestProduceResponseEncode(t *testing.T) { - response := ProduceResponse{} - response.Blocks = make(map[string]map[int32]*ProduceResponseBlock) - testEncodable(t, "empty", &response, produceResponseNoBlocksV0) - - response.Blocks["foo"] = make(map[int32]*ProduceResponseBlock) - response.Blocks["foo"][1] = &ProduceResponseBlock{ - Err: ErrInvalidMessage, - Offset: 255, - Timestamp: time.Unix(1, 0), - } - response.ThrottleTime = 100 * time.Millisecond - for v, produceResponseManyBlocks := range produceResponseManyBlocksVersions { - response.Version = int16(v) - testEncodable(t, fmt.Sprintf("many blocks version %d", v), &response, produceResponseManyBlocks) - } -} - -func TestProduceResponseEncodeInvalidTimestamp(t *testing.T) { - response := ProduceResponse{} - response.Version = 2 - response.Blocks = make(map[string]map[int32]*ProduceResponseBlock) - response.Blocks["t"] = make(map[int32]*ProduceResponseBlock) - response.Blocks["t"][0] = &ProduceResponseBlock{ - Err: ErrNoError, - Offset: 0, - // Use a timestamp before Unix time - Timestamp: time.Unix(0, 0).Add(-1 * time.Millisecond), - } - response.ThrottleTime = 100 * time.Millisecond - _, err := encode(&response, nil) - if err == nil { - t.Error("Expecting error, got nil") - } - if _, ok := err.(PacketEncodingError); !ok { - t.Error("Expecting PacketEncodingError, got:", err) - } -} diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go index 13be2b3c..9c70f818 100644 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -2,6 +2,7 @@ package sarama import ( "encoding/binary" + "errors" "time" ) @@ -12,17 +13,22 @@ type partitionSet struct { } type produceSet struct { - parent *asyncProducer - msgs map[string]map[int32]*partitionSet + parent *asyncProducer + msgs map[string]map[int32]*partitionSet + producerID int64 + producerEpoch int16 bufferBytes int bufferCount int } func newProduceSet(parent *asyncProducer) *produceSet { + pid, epoch := parent.txnmgr.getProducerID() return &produceSet{ - msgs: make(map[string]map[int32]*partitionSet), - parent: parent, + msgs: make(map[string]map[int32]*partitionSet), + parent: parent, + producerID: pid, + producerEpoch: epoch, } } @@ -43,9 +49,10 @@ func (ps *produceSet) add(msg *ProducerMessage) error { } timestamp := msg.Timestamp - if msg.Timestamp.IsZero() { + if timestamp.IsZero() { timestamp = time.Now() } + timestamp = timestamp.Truncate(time.Millisecond) partitions := ps.msgs[msg.Topic] if partitions == nil { @@ -61,9 +68,13 @@ func (ps *produceSet) add(msg *ProducerMessage) error { batch := &RecordBatch{ FirstTimestamp: timestamp, Version: 2, - ProducerID: -1, /* No producer id */ Codec: ps.parent.conf.Producer.Compression, CompressionLevel: ps.parent.conf.Producer.CompressionLevel, + ProducerID: ps.producerID, + ProducerEpoch: ps.producerEpoch, + } + if ps.parent.conf.Producer.Idempotent { + batch.FirstSequence = msg.sequenceNumber } set = &partitionSet{recordsToSend: newDefaultRecords(batch)} size = recordBatchOverhead @@ -73,7 +84,15 @@ func (ps *produceSet) add(msg *ProducerMessage) error { partitions[msg.Partition] = set } + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence { + return errors.New("assertion failed: message out of sequence added to a batch") + } + } + + // Past this point we can't return an error, because we've already added the message to the set. set.msgs = append(set.msgs, msg) + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { // We are being conservative here to avoid having to prep encode the record size += maximumRecordOverhead @@ -120,8 +139,12 @@ func (ps *produceSet) buildRequest() *ProduceRequest { req.Version = 3 } - for topic, partitionSet := range ps.msgs { - for partition, set := range partitionSet { + if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { + req.Version = 7 + } + + for topic, partitionSets := range ps.msgs { + for partition, set := range partitionSets { if req.Version >= 3 { // If the API version we're hitting is 3 or greater, we need to calculate // offsets for each record in the batch relative to FirstOffset. @@ -137,7 +160,6 @@ func (ps *produceSet) buildRequest() *ProduceRequest { record.OffsetDelta = int64(i) } } - req.AddBatch(topic, partition, rb) continue } @@ -183,10 +205,10 @@ func (ps *produceSet) buildRequest() *ProduceRequest { return req } -func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) { +func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) { for topic, partitionSet := range ps.msgs { for partition, set := range partitionSet { - cb(topic, partition, set.msgs) + cb(topic, partition, set) } } } @@ -215,9 +237,8 @@ func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): return true - // Would we overflow the size-limit of a compressed message-batch for this partition? - case ps.parent.conf.Producer.Compression != CompressionNone && - ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && + // Would we overflow the size-limit of a message-batch for this partition? + case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: return true // Would we overflow simply in number of messages? diff --git a/vendor/github.com/Shopify/sarama/produce_set_test.go b/vendor/github.com/Shopify/sarama/produce_set_test.go deleted file mode 100644 index 6663f36f..00000000 --- a/vendor/github.com/Shopify/sarama/produce_set_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package sarama - -import ( - "fmt" - "testing" - "time" -) - -func makeProduceSet() (*asyncProducer, *produceSet) { - parent := &asyncProducer{ - conf: NewConfig(), - } - return parent, newProduceSet(parent) -} - -func safeAddMessage(t *testing.T, ps *produceSet, msg *ProducerMessage) { - if err := ps.add(msg); err != nil { - t.Error(err) - } -} - -func TestProduceSetInitial(t *testing.T) { - _, ps := makeProduceSet() - - if !ps.empty() { - t.Error("New produceSet should be empty") - } - - if ps.readyToFlush() { - t.Error("Empty produceSet must never be ready to flush") - } -} - -func TestProduceSetAddingMessages(t *testing.T) { - parent, ps := makeProduceSet() - parent.conf.Producer.Flush.MaxMessages = 1000 - - msg := &ProducerMessage{Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage)} - safeAddMessage(t, ps, msg) - - if ps.empty() { - t.Error("set shouldn't be empty when a message is added") - } - - if !ps.readyToFlush() { - t.Error("by default set should be ready to flush when any message is in place") - } - - for i := 0; i < 999; i++ { - if ps.wouldOverflow(msg) { - t.Error("set shouldn't fill up after only", i+1, "messages") - } - safeAddMessage(t, ps, msg) - } - - if !ps.wouldOverflow(msg) { - t.Error("set should be full after 1000 messages") - } -} - -func TestProduceSetPartitionTracking(t *testing.T) { - _, ps := makeProduceSet() - - m1 := &ProducerMessage{Topic: "t1", Partition: 0} - m2 := &ProducerMessage{Topic: "t1", Partition: 1} - m3 := &ProducerMessage{Topic: "t2", Partition: 0} - safeAddMessage(t, ps, m1) - safeAddMessage(t, ps, m2) - safeAddMessage(t, ps, m3) - - seenT1P0 := false - seenT1P1 := false - seenT2P0 := false - - ps.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - if len(msgs) != 1 { - t.Error("Wrong message count") - } - - if topic == "t1" && partition == 0 { - seenT1P0 = true - } else if topic == "t1" && partition == 1 { - seenT1P1 = true - } else if topic == "t2" && partition == 0 { - seenT2P0 = true - } - }) - - if !seenT1P0 { - t.Error("Didn't see t1p0") - } - if !seenT1P1 { - t.Error("Didn't see t1p1") - } - if !seenT2P0 { - t.Error("Didn't see t2p0") - } - - if len(ps.dropPartition("t1", 1)) != 1 { - t.Error("Got wrong messages back from dropping partition") - } - - if ps.bufferCount != 2 { - t.Error("Incorrect buffer count after dropping partition") - } -} - -func TestProduceSetRequestBuilding(t *testing.T) { - parent, ps := makeProduceSet() - parent.conf.Producer.RequiredAcks = WaitForAll - parent.conf.Producer.Timeout = 10 * time.Second - - msg := &ProducerMessage{ - Topic: "t1", - Partition: 0, - Key: StringEncoder(TestMessage), - Value: StringEncoder(TestMessage), - } - for i := 0; i < 10; i++ { - safeAddMessage(t, ps, msg) - } - msg.Partition = 1 - for i := 0; i < 10; i++ { - safeAddMessage(t, ps, msg) - } - msg.Topic = "t2" - for i := 0; i < 10; i++ { - safeAddMessage(t, ps, msg) - } - - req := ps.buildRequest() - - if req.RequiredAcks != WaitForAll { - t.Error("RequiredAcks not set properly") - } - - if req.Timeout != 10000 { - t.Error("Timeout not set properly") - } - - if len(req.records) != 2 { - t.Error("Wrong number of topics in request") - } -} - -func TestProduceSetCompressedRequestBuilding(t *testing.T) { - parent, ps := makeProduceSet() - parent.conf.Producer.RequiredAcks = WaitForAll - parent.conf.Producer.Timeout = 10 * time.Second - parent.conf.Producer.Compression = CompressionGZIP - parent.conf.Version = V0_10_0_0 - - msg := &ProducerMessage{ - Topic: "t1", - Partition: 0, - Key: StringEncoder(TestMessage), - Value: StringEncoder(TestMessage), - Timestamp: time.Now(), - } - for i := 0; i < 10; i++ { - safeAddMessage(t, ps, msg) - } - - req := ps.buildRequest() - - if req.Version != 2 { - t.Error("Wrong request version") - } - - for _, msgBlock := range req.records["t1"][0].MsgSet.Messages { - msg := msgBlock.Msg - err := msg.decodeSet() - if err != nil { - t.Error("Failed to decode set from payload") - } - for i, compMsgBlock := range msg.Set.Messages { - compMsg := compMsgBlock.Msg - if compMsg.Version != 1 { - t.Error("Wrong compressed message version") - } - if compMsgBlock.Offset != int64(i) { - t.Errorf("Wrong relative inner offset, expected %d, got %d", i, compMsgBlock.Offset) - } - } - if msg.Version != 1 { - t.Error("Wrong compressed parent message version") - } - } -} - -func TestProduceSetV3RequestBuilding(t *testing.T) { - parent, ps := makeProduceSet() - parent.conf.Producer.RequiredAcks = WaitForAll - parent.conf.Producer.Timeout = 10 * time.Second - parent.conf.Version = V0_11_0_0 - - now := time.Now() - msg := &ProducerMessage{ - Topic: "t1", - Partition: 0, - Key: StringEncoder(TestMessage), - Value: StringEncoder(TestMessage), - Headers: []RecordHeader{ - RecordHeader{ - Key: []byte("header-1"), - Value: []byte("value-1"), - }, - RecordHeader{ - Key: []byte("header-2"), - Value: []byte("value-2"), - }, - RecordHeader{ - Key: []byte("header-3"), - Value: []byte("value-3"), - }, - }, - Timestamp: now, - } - for i := 0; i < 10; i++ { - safeAddMessage(t, ps, msg) - msg.Timestamp = msg.Timestamp.Add(time.Second) - } - - req := ps.buildRequest() - - if req.Version != 3 { - t.Error("Wrong request version") - } - - batch := req.records["t1"][0].RecordBatch - if batch.FirstTimestamp != now { - t.Errorf("Wrong first timestamp: %v", batch.FirstTimestamp) - } - for i := 0; i < 10; i++ { - rec := batch.Records[i] - if rec.TimestampDelta != time.Duration(i)*time.Second { - t.Errorf("Wrong timestamp delta: %v", rec.TimestampDelta) - } - - if rec.OffsetDelta != int64(i) { - t.Errorf("Wrong relative inner offset, expected %d, got %d", i, rec.OffsetDelta) - } - - for j, h := range batch.Records[i].Headers { - exp := fmt.Sprintf("header-%d", j+1) - if string(h.Key) != exp { - t.Errorf("Wrong header key, expected %v, got %v", exp, h.Key) - } - exp = fmt.Sprintf("value-%d", j+1) - if string(h.Value) != exp { - t.Errorf("Wrong header value, expected %v, got %v", exp, h.Value) - } - } - } -} diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go index 23045e7d..8ac576db 100644 --- a/vendor/github.com/Shopify/sarama/real_decoder.go +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -7,11 +7,11 @@ import ( var errInvalidArrayLength = PacketDecodingError{"invalid array length"} var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} -var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"} var errInvalidStringLength = PacketDecodingError{"invalid string length"} -var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"} var errVarintOverflow = PacketDecodingError{"varint overflow"} +var errUVarintOverflow = PacketDecodingError{"uvarint overflow"} var errInvalidBool = PacketDecodingError{"invalid bool"} +var errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"} type realDecoder struct { raw []byte @@ -75,6 +75,22 @@ func (rd *realDecoder) getVarint() (int64, error) { return tmp, nil } +func (rd *realDecoder) getUVarint() (uint64, error) { + tmp, n := binary.Uvarint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return 0, ErrInsufficientData + } + + if n < 0 { + rd.off -= n + return 0, errUVarintOverflow + } + + rd.off += n + return tmp, nil +} + func (rd *realDecoder) getArrayLength() (int, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) @@ -91,6 +107,19 @@ func (rd *realDecoder) getArrayLength() (int, error) { return tmp, nil } +func (rd *realDecoder) getCompactArrayLength() (int, error) { + n, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if n == 0 { + return 0, nil + } + + return int(n) - 1, nil +} + func (rd *realDecoder) getBool() (bool, error) { b, err := rd.getInt8() if err != nil || b == 0 { @@ -102,6 +131,19 @@ func (rd *realDecoder) getBool() (bool, error) { return true, nil } +func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) { + tagCount, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if tagCount != 0 { + return 0, errUnsupportedTaggedFields + } + + return 0, nil +} + // collections func (rd *realDecoder) getBytes() ([]byte, error) { @@ -169,6 +211,58 @@ func (rd *realDecoder) getNullableString() (*string, error) { return &tmpStr, err } +func (rd *realDecoder) getCompactString() (string, error) { + n, err := rd.getUVarint() + if err != nil { + return "", err + } + + var length = int(n - 1) + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length + return tmpStr, nil +} + +func (rd *realDecoder) getCompactNullableString() (*string, error) { + n, err := rd.getUVarint() + + if err != nil { + return nil, err + } + + var length = int(n - 1) + + if length < 0 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length + return &tmpStr, err +} + +func (rd *realDecoder) getCompactInt32Array() ([]int32, error) { + n, err := rd.getUVarint() + if err != nil { + return nil, err + } + + if n == 0 { + return nil, nil + } + + arrayLength := int(n) - 1 + + ret := make([]int32, arrayLength) + + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + func (rd *realDecoder) getInt32Array() ([]int32, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) @@ -290,6 +384,14 @@ func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) { return &realDecoder{raw: rd.raw[off : off+length]}, nil } +func (rd *realDecoder) peekInt8(offset int) (int8, error) { + const byteLen = 1 + if rd.remaining() < offset+byteLen { + return -1, ErrInsufficientData + } + return int8(rd.raw[rd.off+offset]), nil +} + // stacks func (rd *realDecoder) push(in pushDecoder) error { diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go index 3c75387f..ba073f7d 100644 --- a/vendor/github.com/Shopify/sarama/real_encoder.go +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -2,6 +2,7 @@ package sarama import ( "encoding/binary" + "errors" "github.com/rcrowley/go-metrics" ) @@ -39,11 +40,20 @@ func (re *realEncoder) putVarint(in int64) { re.off += binary.PutVarint(re.raw[re.off:], in) } +func (re *realEncoder) putUVarint(in uint64) { + re.off += binary.PutUvarint(re.raw[re.off:], in) +} + func (re *realEncoder) putArrayLength(in int) error { re.putInt32(int32(in)) return nil } +func (re *realEncoder) putCompactArrayLength(in int) { + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(in + 1)) +} + func (re *realEncoder) putBool(in bool) { if in { re.putInt8(1) @@ -78,6 +88,19 @@ func (re *realEncoder) putVarintBytes(in []byte) error { return re.putRawBytes(in) } +func (re *realEncoder) putCompactString(in string) error { + re.putCompactArrayLength(len(in)) + return re.putRawBytes([]byte(in)) +} + +func (re *realEncoder) putNullableCompactString(in *string) error { + if in == nil { + re.putInt8(0) + return nil + } + return re.putCompactString(*in) +} + func (re *realEncoder) putString(in string) error { re.putInt16(int16(len(in))) copy(re.raw[re.off:], in) @@ -108,6 +131,31 @@ func (re *realEncoder) putStringArray(in []string) error { return nil } +func (re *realEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + re.putUVarint(0) + return nil + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + func (re *realEncoder) putInt32Array(in []int32) error { err := re.putArrayLength(len(in)) if err != nil { @@ -130,6 +178,10 @@ func (re *realEncoder) putInt64Array(in []int64) error { return nil } +func (re *realEncoder) putEmptyTaggedFieldArray() { + re.putUVarint(0) +} + func (re *realEncoder) offset() int { return re.off } diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go index cded308c..cdccfe32 100644 --- a/vendor/github.com/Shopify/sarama/record.go +++ b/vendor/github.com/Shopify/sarama/record.go @@ -6,10 +6,12 @@ import ( ) const ( + isTransactionalMask = 0x10 controlMask = 0x20 maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 ) +//RecordHeader stores key and value for a record header type RecordHeader struct { Key []byte Value []byte @@ -33,15 +35,16 @@ func (h *RecordHeader) decode(pd packetDecoder) (err error) { return nil } +//Record is kafka record type type Record struct { + Headers []*RecordHeader + Attributes int8 TimestampDelta time.Duration OffsetDelta int64 Key []byte Value []byte - Headers []*RecordHeader - - length varintLengthField + length varintLengthField } func (r *Record) encode(pe packetEncoder) error { diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go index 845318aa..c653763e 100644 --- a/vendor/github.com/Shopify/sarama/record_batch.go +++ b/vendor/github.com/Shopify/sarama/record_batch.go @@ -1,14 +1,8 @@ package sarama import ( - "bytes" - "compress/gzip" "fmt" - "io/ioutil" "time" - - "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4" ) const recordBatchOverhead = 49 @@ -42,6 +36,7 @@ type RecordBatch struct { Codec CompressionCodec CompressionLevel int Control bool + LogAppendTime bool LastOffsetDelta int32 FirstTimestamp time.Time MaxTimestamp time.Time @@ -50,11 +45,16 @@ type RecordBatch struct { FirstSequence int32 Records []*Record PartialTrailingRecord bool + IsTransactional bool compressedRecords []byte recordsLen int // uncompressed records size } +func (b *RecordBatch) LastOffset() int64 { + return b.FirstOffset + int64(b.LastOffsetDelta) +} + func (b *RecordBatch) encode(pe packetEncoder) error { if b.Version != 2 { return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} @@ -116,7 +116,10 @@ func (b *RecordBatch) decode(pd packetDecoder) (err error) { return err } - if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil { + crc32Decoder := acquireCrc32Field(crcCastagnoli) + defer releaseCrc32Field(crc32Decoder) + + if err = pd.push(crc32Decoder); err != nil { return err } @@ -126,6 +129,8 @@ func (b *RecordBatch) decode(pd packetDecoder) (err error) { } b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) b.Control = attributes&controlMask == controlMask + b.LogAppendTime = attributes×tampTypeMask == timestampTypeMask + b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask if b.LastOffsetDelta, err = pd.getInt32(); err != nil { return err @@ -174,27 +179,9 @@ func (b *RecordBatch) decode(pd packetDecoder) (err error) { return err } - switch b.Codec { - case CompressionNone: - case CompressionGZIP: - reader, err := gzip.NewReader(bytes.NewReader(recBuffer)) - if err != nil { - return err - } - if recBuffer, err = ioutil.ReadAll(reader); err != nil { - return err - } - case CompressionSnappy: - if recBuffer, err = snappy.Decode(recBuffer); err != nil { - return err - } - case CompressionLZ4: - reader := lz4.NewReader(bytes.NewReader(recBuffer)) - if recBuffer, err = ioutil.ReadAll(reader); err != nil { - return err - } - default: - return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)} + recBuffer, err = decompress(b.Codec, recBuffer) + if err != nil { + return err } b.recordsLen = len(recBuffer) @@ -215,44 +202,8 @@ func (b *RecordBatch) encodeRecords(pe packetEncoder) error { } b.recordsLen = len(raw) - switch b.Codec { - case CompressionNone: - b.compressedRecords = raw - case CompressionGZIP: - var buf bytes.Buffer - var writer *gzip.Writer - if b.CompressionLevel != CompressionLevelDefault { - writer, err = gzip.NewWriterLevel(&buf, b.CompressionLevel) - if err != nil { - return err - } - } else { - writer = gzip.NewWriter(&buf) - } - if _, err := writer.Write(raw); err != nil { - return err - } - if err := writer.Close(); err != nil { - return err - } - b.compressedRecords = buf.Bytes() - case CompressionSnappy: - b.compressedRecords = snappy.Encode(raw) - case CompressionLZ4: - var buf bytes.Buffer - writer := lz4.NewWriter(&buf) - if _, err := writer.Write(raw); err != nil { - return err - } - if err := writer.Close(); err != nil { - return err - } - b.compressedRecords = buf.Bytes() - default: - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} - } - - return nil + b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw) + return err } func (b *RecordBatch) computeAttributes() int16 { @@ -260,6 +211,12 @@ func (b *RecordBatch) computeAttributes() int16 { if b.Control { attr |= controlMask } + if b.LogAppendTime { + attr |= timestampTypeMask + } + if b.IsTransactional { + attr |= isTransactionalMask + } return attr } diff --git a/vendor/github.com/Shopify/sarama/record_test.go b/vendor/github.com/Shopify/sarama/record_test.go deleted file mode 100644 index 2756c5b2..00000000 --- a/vendor/github.com/Shopify/sarama/record_test.go +++ /dev/null @@ -1,292 +0,0 @@ -package sarama - -import ( - "reflect" - "runtime" - "strconv" - "strings" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" -) - -var recordBatchTestCases = []struct { - name string - batch RecordBatch - encoded []byte - oldGoEncoded []byte // used in case of gzipped content for go versions prior to 1.8 -}{ - { - name: "empty record", - batch: RecordBatch{ - Version: 2, - FirstTimestamp: time.Unix(0, 0), - MaxTimestamp: time.Unix(0, 0), - Records: []*Record{}, - }, - encoded: []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, // First Offset - 0, 0, 0, 49, // Length - 0, 0, 0, 0, // Partition Leader Epoch - 2, // Version - 89, 95, 183, 221, // CRC - 0, 0, // Attributes - 0, 0, 0, 0, // Last Offset Delta - 0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID - 0, 0, // Producer Epoch - 0, 0, 0, 0, // First Sequence - 0, 0, 0, 0, // Number of Records - }, - }, - { - name: "control batch", - batch: RecordBatch{ - Version: 2, - Control: true, - FirstTimestamp: time.Unix(0, 0), - MaxTimestamp: time.Unix(0, 0), - Records: []*Record{}, - }, - encoded: []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, // First Offset - 0, 0, 0, 49, // Length - 0, 0, 0, 0, // Partition Leader Epoch - 2, // Version - 81, 46, 67, 217, // CRC - 0, 32, // Attributes - 0, 0, 0, 0, // Last Offset Delta - 0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID - 0, 0, // Producer Epoch - 0, 0, 0, 0, // First Sequence - 0, 0, 0, 0, // Number of Records - }, - }, - { - name: "uncompressed record", - batch: RecordBatch{ - Version: 2, - FirstTimestamp: time.Unix(1479847795, 0), - MaxTimestamp: time.Unix(0, 0), - LastOffsetDelta: 0, - Records: []*Record{{ - TimestampDelta: 5 * time.Millisecond, - Key: []byte{1, 2, 3, 4}, - Value: []byte{5, 6, 7}, - Headers: []*RecordHeader{{ - Key: []byte{8, 9, 10}, - Value: []byte{11, 12}, - }}, - }}, - recordsLen: 21, - }, - encoded: []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, // First Offset - 0, 0, 0, 70, // Length - 0, 0, 0, 0, // Partition Leader Epoch - 2, // Version - 84, 121, 97, 253, // CRC - 0, 0, // Attributes - 0, 0, 0, 0, // Last Offset Delta - 0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID - 0, 0, // Producer Epoch - 0, 0, 0, 0, // First Sequence - 0, 0, 0, 1, // Number of Records - 40, // Record Length - 0, // Attributes - 10, // Timestamp Delta - 0, // Offset Delta - 8, // Key Length - 1, 2, 3, 4, - 6, // Value Length - 5, 6, 7, - 2, // Number of Headers - 6, // Header Key Length - 8, 9, 10, // Header Key - 4, // Header Value Length - 11, 12, // Header Value - }, - }, - { - name: "gzipped record", - batch: RecordBatch{ - Version: 2, - Codec: CompressionGZIP, - CompressionLevel: CompressionLevelDefault, - FirstTimestamp: time.Unix(1479847795, 0), - MaxTimestamp: time.Unix(0, 0), - LastOffsetDelta: 0, - Records: []*Record{{ - TimestampDelta: 5 * time.Millisecond, - Key: []byte{1, 2, 3, 4}, - Value: []byte{5, 6, 7}, - Headers: []*RecordHeader{{ - Key: []byte{8, 9, 10}, - Value: []byte{11, 12}, - }}, - }}, - recordsLen: 21, - }, - encoded: []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, // First Offset - 0, 0, 0, 94, // Length - 0, 0, 0, 0, // Partition Leader Epoch - 2, // Version - 159, 236, 182, 189, // CRC - 0, 1, // Attributes - 0, 0, 0, 0, // Last Offset Delta - 0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID - 0, 0, // Producer Epoch - 0, 0, 0, 0, // First Sequence - 0, 0, 0, 1, // Number of Records - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 210, 96, 224, 98, 224, 96, 100, 98, 102, 97, 99, 101, - 99, 103, 98, 227, 224, 228, 98, 225, 230, 1, 4, 0, 0, 255, 255, 173, 201, 88, 103, 21, 0, 0, 0, - }, - oldGoEncoded: []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, // First Offset - 0, 0, 0, 94, // Length - 0, 0, 0, 0, // Partition Leader Epoch - 2, // Version - 0, 216, 14, 210, // CRC - 0, 1, // Attributes - 0, 0, 0, 0, // Last Offset Delta - 0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID - 0, 0, // Producer Epoch - 0, 0, 0, 0, // First Sequence - 0, 0, 0, 1, // Number of Records - 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 210, 96, 224, 98, 224, 96, 100, 98, 102, 97, 99, 101, - 99, 103, 98, 227, 224, 228, 98, 225, 230, 1, 4, 0, 0, 255, 255, 173, 201, 88, 103, 21, 0, 0, 0, - }, - }, - { - name: "snappy compressed record", - batch: RecordBatch{ - Version: 2, - Codec: CompressionSnappy, - FirstTimestamp: time.Unix(1479847795, 0), - MaxTimestamp: time.Unix(0, 0), - LastOffsetDelta: 0, - Records: []*Record{{ - TimestampDelta: 5 * time.Millisecond, - Key: []byte{1, 2, 3, 4}, - Value: []byte{5, 6, 7}, - Headers: []*RecordHeader{{ - Key: []byte{8, 9, 10}, - Value: []byte{11, 12}, - }}, - }}, - recordsLen: 21, - }, - encoded: []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, // First Offset - 0, 0, 0, 72, // Length - 0, 0, 0, 0, // Partition Leader Epoch - 2, // Version - 21, 0, 159, 97, // CRC - 0, 2, // Attributes - 0, 0, 0, 0, // Last Offset Delta - 0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID - 0, 0, // Producer Epoch - 0, 0, 0, 0, // First Sequence - 0, 0, 0, 1, // Number of Records - 21, 80, 40, 0, 10, 0, 8, 1, 2, 3, 4, 6, 5, 6, 7, 2, 6, 8, 9, 10, 4, 11, 12, - }, - }, - { - name: "lz4 compressed record", - batch: RecordBatch{ - Version: 2, - Codec: CompressionLZ4, - FirstTimestamp: time.Unix(1479847795, 0), - MaxTimestamp: time.Unix(0, 0), - LastOffsetDelta: 0, - Records: []*Record{{ - TimestampDelta: 5 * time.Millisecond, - Key: []byte{1, 2, 3, 4}, - Value: []byte{5, 6, 7}, - Headers: []*RecordHeader{{ - Key: []byte{8, 9, 10}, - Value: []byte{11, 12}, - }}, - }}, - recordsLen: 21, - }, - encoded: []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, // First Offset - 0, 0, 0, 89, // Length - 0, 0, 0, 0, // Partition Leader Epoch - 2, // Version - 169, 74, 119, 197, // CRC - 0, 3, // Attributes - 0, 0, 0, 0, // Last Offset Delta - 0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp - 0, 0, 0, 0, 0, 0, 0, 0, // Producer ID - 0, 0, // Producer Epoch - 0, 0, 0, 0, // First Sequence - 0, 0, 0, 1, // Number of Records - 4, 34, 77, 24, 100, 112, 185, 21, 0, 0, 128, 40, 0, 10, 0, 8, 1, 2, 3, 4, 6, 5, 6, 7, 2, - 6, 8, 9, 10, 4, 11, 12, 0, 0, 0, 0, 12, 59, 239, 146, - }, - }, -} - -func isOldGo(t *testing.T) bool { - v := strings.Split(runtime.Version()[2:], ".") - if len(v) < 2 { - t.Logf("Can't parse version: %s", runtime.Version()) - return false - } - maj, err := strconv.Atoi(v[0]) - if err != nil { - t.Logf("Can't parse version: %s", runtime.Version()) - return false - } - min, err := strconv.Atoi(v[1]) - if err != nil { - t.Logf("Can't parse version: %s", runtime.Version()) - return false - } - return maj < 1 || (maj == 1 && min < 8) -} - -func TestRecordBatchEncoding(t *testing.T) { - for _, tc := range recordBatchTestCases { - if tc.oldGoEncoded != nil && isOldGo(t) { - testEncodable(t, tc.name, &tc.batch, tc.oldGoEncoded) - } else { - testEncodable(t, tc.name, &tc.batch, tc.encoded) - } - } -} - -func TestRecordBatchDecoding(t *testing.T) { - for _, tc := range recordBatchTestCases { - batch := RecordBatch{} - testDecodable(t, tc.name, &batch, tc.encoded) - for _, r := range batch.Records { - r.length = varintLengthField{} - } - for _, r := range tc.batch.Records { - r.length = varintLengthField{} - } - // The compression level is not restored on decoding. It is not needed - // anyway. We only set it here to ensure that comparision succeeds. - batch.CompressionLevel = tc.batch.CompressionLevel - if !reflect.DeepEqual(batch, tc.batch) { - t.Errorf(spew.Sprintf("invalid decode of %s\ngot %+v\nwanted %+v", tc.name, batch, tc.batch)) - } - } -} diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go index 192f5927..f4c5e95f 100644 --- a/vendor/github.com/Shopify/sarama/records.go +++ b/vendor/github.com/Shopify/sarama/records.go @@ -8,7 +8,6 @@ const ( defaultRecords magicOffset = 16 - magicLength = 1 ) // Records implements a union type containing either a RecordBatch or a legacy MessageSet. @@ -185,10 +184,20 @@ func (r *Records) isOverflow() (bool, error) { } func magicValue(pd packetDecoder) (int8, error) { - dec, err := pd.peek(magicOffset, magicLength) + return pd.peekInt8(magicOffset) +} + +func (r *Records) getControlRecord() (ControlRecord, error) { + if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 { + return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty") + } + + firstRecord := r.RecordBatch.Records[0] + controlRecord := ControlRecord{} + err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value}) if err != nil { - return 0, err + return ControlRecord{}, err } - return dec.getInt8() + return controlRecord, nil } diff --git a/vendor/github.com/Shopify/sarama/records_test.go b/vendor/github.com/Shopify/sarama/records_test.go deleted file mode 100644 index cd6aa278..00000000 --- a/vendor/github.com/Shopify/sarama/records_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package sarama - -import ( - "bytes" - "reflect" - "testing" -) - -func TestLegacyRecords(t *testing.T) { - set := &MessageSet{ - Messages: []*MessageBlock{ - { - Msg: &Message{ - Version: 1, - }, - }, - }, - } - r := newLegacyRecords(set) - - exp, err := encode(set, nil) - if err != nil { - t.Fatal(err) - } - buf, err := encode(&r, nil) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(buf, exp) { - t.Errorf("Wrong encoding for legacy records, wanted %v, got %v", exp, buf) - } - - set = &MessageSet{} - r = Records{} - - err = decode(exp, set) - if err != nil { - t.Fatal(err) - } - err = decode(buf, &r) - if err != nil { - t.Fatal(err) - } - - if r.recordsType != legacyRecords { - t.Fatalf("Wrong records type %v, expected %v", r.recordsType, legacyRecords) - } - if !reflect.DeepEqual(set, r.MsgSet) { - t.Errorf("Wrong decoding for legacy records, wanted %#+v, got %#+v", set, r.MsgSet) - } - - n, err := r.numRecords() - if err != nil { - t.Fatal(err) - } - if n != 1 { - t.Errorf("Wrong number of records, wanted 1, got %d", n) - } - - p, err := r.isPartial() - if err != nil { - t.Fatal(err) - } - if p { - t.Errorf("MessageSet shouldn't have a partial trailing message") - } - - c, err := r.isControl() - if err != nil { - t.Fatal(err) - } - if c { - t.Errorf("MessageSet can't be a control batch") - } -} - -func TestDefaultRecords(t *testing.T) { - batch := &RecordBatch{ - Version: 2, - Records: []*Record{ - { - Value: []byte{1}, - }, - }, - } - - r := newDefaultRecords(batch) - - exp, err := encode(batch, nil) - if err != nil { - t.Fatal(err) - } - buf, err := encode(&r, nil) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(buf, exp) { - t.Errorf("Wrong encoding for default records, wanted %v, got %v", exp, buf) - } - - batch = &RecordBatch{} - r = Records{} - - err = decode(exp, batch) - if err != nil { - t.Fatal(err) - } - err = decode(buf, &r) - if err != nil { - t.Fatal(err) - } - - if r.recordsType != defaultRecords { - t.Fatalf("Wrong records type %v, expected %v", r.recordsType, defaultRecords) - } - if !reflect.DeepEqual(batch, r.RecordBatch) { - t.Errorf("Wrong decoding for default records, wanted %#+v, got %#+v", batch, r.RecordBatch) - } - - n, err := r.numRecords() - if err != nil { - t.Fatal(err) - } - if n != 1 { - t.Errorf("Wrong number of records, wanted 1, got %d", n) - } - - p, err := r.isPartial() - if err != nil { - t.Fatal(err) - } - if p { - t.Errorf("RecordBatch shouldn't have a partial trailing record") - } - - c, err := r.isControl() - if err != nil { - t.Fatal(err) - } - if c { - t.Errorf("RecordBatch shouldn't be a control batch") - } -} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go index 4d211a14..dcfd3946 100644 --- a/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/Shopify/sarama/request.go @@ -11,6 +11,7 @@ type protocolBody interface { versionedDecoder key() int16 version() int16 + headerVersion() int16 requiredVersion() KafkaVersion } @@ -20,51 +21,82 @@ type request struct { body protocolBody } -func (r *request) encode(pe packetEncoder) (err error) { +func (r *request) encode(pe packetEncoder) error { pe.push(&lengthField{}) pe.putInt16(r.body.key()) pe.putInt16(r.body.version()) pe.putInt32(r.correlationID) - err = pe.putString(r.clientID) - if err != nil { - return err + + if r.body.headerVersion() >= 1 { + err := pe.putString(r.clientID) + if err != nil { + return err + } } - err = r.body.encode(pe) + + if r.body.headerVersion() >= 2 { + // we don't use tag headers at the moment so we just put an array length of 0 + pe.putUVarint(0) + } + + err := r.body.encode(pe) if err != nil { return err } + return pe.pop() } func (r *request) decode(pd packetDecoder) (err error) { - var key int16 - if key, err = pd.getInt16(); err != nil { + key, err := pd.getInt16() + if err != nil { return err } - var version int16 - if version, err = pd.getInt16(); err != nil { + + version, err := pd.getInt16() + if err != nil { return err } - if r.correlationID, err = pd.getInt32(); err != nil { + + r.correlationID, err = pd.getInt32() + if err != nil { return err } + r.clientID, err = pd.getString() + if err != nil { + return err + } r.body = allocateBody(key, version) if r.body == nil { return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} } + + if r.body.headerVersion() >= 2 { + // tagged field + _, err = pd.getUVarint() + if err != nil { + return err + } + } + return r.body.decode(pd, version) } -func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { - lengthBytes := make([]byte, 4) +func decodeRequest(r io.Reader) (*request, int, error) { + var ( + bytesRead int + lengthBytes = make([]byte, 4) + ) + if _, err := io.ReadFull(r, lengthBytes); err != nil { return nil, bytesRead, err } - bytesRead += len(lengthBytes) + bytesRead += len(lengthBytes) length := int32(binary.BigEndian.Uint32(lengthBytes)) + if length <= 4 || length > MaxRequestSize { return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} } @@ -73,12 +105,14 @@ func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { if _, err := io.ReadFull(r, encodedReq); err != nil { return nil, bytesRead, err } + bytesRead += len(encodedReq) - req = &request{} + req := &request{} if err := decode(encodedReq, req); err != nil { return nil, bytesRead, err } + return req, bytesRead, nil } @@ -87,7 +121,7 @@ func allocateBody(key, version int16) protocolBody { case 0: return &ProduceRequest{} case 1: - return &FetchRequest{} + return &FetchRequest{Version: version} case 2: return &OffsetRequest{Version: version} case 3: @@ -140,10 +174,18 @@ func allocateBody(key, version int16) protocolBody { return &DescribeConfigsRequest{} case 33: return &AlterConfigsRequest{} + case 35: + return &DescribeLogDirsRequest{} + case 36: + return &SaslAuthenticateRequest{} case 37: return &CreatePartitionsRequest{} case 42: return &DeleteGroupsRequest{} + case 45: + return &AlterPartitionReassignmentsRequest{} + case 46: + return &ListPartitionReassignmentsRequest{} } return nil } diff --git a/vendor/github.com/Shopify/sarama/request_test.go b/vendor/github.com/Shopify/sarama/request_test.go deleted file mode 100644 index ffc2bb7a..00000000 --- a/vendor/github.com/Shopify/sarama/request_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package sarama - -import ( - "bytes" - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -type testRequestBody struct { -} - -func (s *testRequestBody) key() int16 { - return 0x666 -} - -func (s *testRequestBody) version() int16 { - return 0xD2 -} - -func (s *testRequestBody) encode(pe packetEncoder) error { - return pe.putString("abc") -} - -// not specific to request tests, just helper functions for testing structures that -// implement the encoder or decoder interfaces that needed somewhere to live - -func testEncodable(t *testing.T, name string, in encoder, expect []byte) { - packet, err := encode(in, nil) - if err != nil { - t.Error(err) - } else if !bytes.Equal(packet, expect) { - t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expect) - } -} - -func testDecodable(t *testing.T, name string, out decoder, in []byte) { - err := decode(in, out) - if err != nil { - t.Error("Decoding", name, "failed:", err) - } -} - -func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in []byte, version int16) { - err := versionedDecode(in, out, version) - if err != nil { - t.Error("Decoding", name, "version", version, "failed:", err) - } -} - -func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) { - if !rb.requiredVersion().IsAtLeast(MinVersion) { - t.Errorf("Request %s has invalid required version", name) - } - packet := testRequestEncode(t, name, rb, expected) - testRequestDecode(t, name, rb, packet) -} - -func testRequestEncode(t *testing.T, name string, rb protocolBody, expected []byte) []byte { - req := &request{correlationID: 123, clientID: "foo", body: rb} - packet, err := encode(req, nil) - headerSize := 14 + len("foo") - if err != nil { - t.Error(err) - } else if !bytes.Equal(packet[headerSize:], expected) { - t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected) - } - return packet -} - -func testRequestDecode(t *testing.T, name string, rb protocolBody, packet []byte) { - decoded, n, err := decodeRequest(bytes.NewReader(packet)) - if err != nil { - t.Error("Failed to decode request", err) - } else if decoded.correlationID != 123 || decoded.clientID != "foo" { - t.Errorf("Decoded header %q is not valid: %+v", name, decoded) - } else if !reflect.DeepEqual(rb, decoded.body) { - t.Error(spew.Sprintf("Decoded request %q does not match the encoded one\nencoded: %+v\ndecoded: %+v", name, rb, decoded.body)) - } else if n != len(packet) { - t.Errorf("Decoded request %q bytes: %d does not match the encoded one: %d\n", name, n, len(packet)) - } else if rb.version() != decoded.body.version() { - t.Errorf("Decoded request %q version: %d does not match the encoded one: %d\n", name, decoded.body.version(), rb.version()) - } -} - -func testResponse(t *testing.T, name string, res protocolBody, expected []byte) { - encoded, err := encode(res, nil) - if err != nil { - t.Error(err) - } else if expected != nil && !bytes.Equal(encoded, expected) { - t.Error("Encoding", name, "failed\ngot ", encoded, "\nwant", expected) - } - - decoded := reflect.New(reflect.TypeOf(res).Elem()).Interface().(versionedDecoder) - if err := versionedDecode(encoded, decoded, res.version()); err != nil { - t.Error("Decoding", name, "failed:", err) - } - - if !reflect.DeepEqual(decoded, res) { - t.Errorf("Decoded response does not match the encoded one\nencoded: %#v\ndecoded: %#v", res, decoded) - } -} - -func nullString(s string) *string { return &s } diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go index f3f4d27d..5dffb75b 100644 --- a/vendor/github.com/Shopify/sarama/response_header.go +++ b/vendor/github.com/Shopify/sarama/response_header.go @@ -2,12 +2,15 @@ package sarama import "fmt" +const responseLengthSize = 4 +const correlationIDSize = 4 + type responseHeader struct { length int32 correlationID int32 } -func (r *responseHeader) decode(pd packetDecoder) (err error) { +func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) { r.length, err = pd.getInt32() if err != nil { return err @@ -17,5 +20,12 @@ func (r *responseHeader) decode(pd packetDecoder) (err error) { } r.correlationID, err = pd.getInt32() + + if version >= 1 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return err } diff --git a/vendor/github.com/Shopify/sarama/response_header_test.go b/vendor/github.com/Shopify/sarama/response_header_test.go deleted file mode 100644 index 8f9fdb80..00000000 --- a/vendor/github.com/Shopify/sarama/response_header_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package sarama - -import "testing" - -var ( - responseHeaderBytes = []byte{ - 0x00, 0x00, 0x0f, 0x00, - 0x0a, 0xbb, 0xcc, 0xff} -) - -func TestResponseHeader(t *testing.T) { - header := responseHeader{} - - testDecodable(t, "response header", &header, responseHeaderBytes) - if header.length != 0xf00 { - t.Error("Decoding header length failed, got", header.length) - } - if header.correlationID != 0x0abbccff { - t.Error("Decoding header correlation id failed, got", header.correlationID) - } -} diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go index 7d5dc60d..48f362d2 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -10,10 +10,7 @@ useful but comes with two caveats: it will generally be less efficient, and the depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. -To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic -consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the -https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 -and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. +To consume messages, use Consumer or Consumer-Group API. For lower-level needs, the Broker and Request/Response objects permit precise control over each connection and message sent on the wire; the Client provides higher-level metadata management that is shared between @@ -42,6 +39,10 @@ Broker related metrics: | response-rate-for-broker- | meter | Responses/second received from a given broker | | response-size | histogram | Distribution of the response size in bytes for all brokers | | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + | requests-in-flight | counter | The current number of in-flight requests awaiting a response | + | | | for all brokers | + | requests-in-flight-for-broker- | counter | The current number of in-flight requests awaiting a response | + | | | for a given broker | +----------------------------------------------+------------+---------------------------------------------------------------+ Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. @@ -61,6 +62,14 @@ Producer related metrics: | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ +Consumer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | consumer-batch-size | histogram | Distribution of the number of messages in a batch | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + */ package sarama @@ -69,10 +78,29 @@ import ( "log" ) -// Logger is the instance of a StdLogger interface that Sarama writes connection -// management events to. By default it is set to discard all log messages via ioutil.Discard, -// but you can set it to redirect wherever you want. -var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) +var ( + // Logger is the instance of a StdLogger interface that Sarama writes connection + // management events to. By default it is set to discard all log messages via ioutil.Discard, + // but you can set it to redirect wherever you want. + Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + + // PanicHandler is called for recovering from panics spawned internally to the library (and thus + // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. + PanicHandler func(interface{}) + + // MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying + // to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned + // with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt + // to process. + MaxRequestSize int32 = 100 * 1024 * 1024 + + // MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If + // a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to + // protect the client from running out of memory. Please note that brokers do not have any natural limit on + // the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers + // (see https://issues.apache.org/jira/browse/KAFKA-2063). + MaxResponseSize int32 = 100 * 1024 * 1024 +) // StdLogger is used to log error messages. type StdLogger interface { @@ -80,20 +108,3 @@ type StdLogger interface { Printf(format string, v ...interface{}) Println(v ...interface{}) } - -// PanicHandler is called for recovering from panics spawned internally to the library (and thus -// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. -var PanicHandler func(interface{}) - -// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying -// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned -// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt -// to process. -var MaxRequestSize int32 = 100 * 1024 * 1024 - -// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If -// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to -// protect the client from running out of memory. Please note that brokers do not have any natural limit on -// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers -// (see https://issues.apache.org/jira/browse/KAFKA-2063). -var MaxResponseSize int32 = 100 * 1024 * 1024 diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go new file mode 100644 index 00000000..90504df6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go @@ -0,0 +1,33 @@ +package sarama + +type SaslAuthenticateRequest struct { + SaslAuthBytes []byte +} + +// APIKeySASLAuth is the API key for the SaslAuthenticate Kafka API +const APIKeySASLAuth = 36 + +func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error { + return pe.putBytes(r.SaslAuthBytes) +} + +func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) { + r.SaslAuthBytes, err = pd.getBytes() + return err +} + +func (r *SaslAuthenticateRequest) key() int16 { + return APIKeySASLAuth +} + +func (r *SaslAuthenticateRequest) version() int16 { + return 0 +} + +func (r *SaslAuthenticateRequest) headerVersion() int16 { + return 1 +} + +func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go new file mode 100644 index 00000000..3ef57b5a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go @@ -0,0 +1,48 @@ +package sarama + +type SaslAuthenticateResponse struct { + Err KError + ErrorMessage *string + SaslAuthBytes []byte +} + +func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putNullableString(r.ErrorMessage); err != nil { + return err + } + return pe.putBytes(r.SaslAuthBytes) +} + +func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.ErrorMessage, err = pd.getNullableString(); err != nil { + return err + } + + r.SaslAuthBytes, err = pd.getBytes() + + return err +} + +func (r *SaslAuthenticateResponse) key() int16 { + return APIKeySASLAuth +} + +func (r *SaslAuthenticateResponse) version() int16 { + return 0 +} + +func (r *SaslAuthenticateResponse) headerVersion() int16 { + return 0 +} + +func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go index fbbc8947..74dc3072 100644 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go @@ -2,6 +2,7 @@ package sarama type SaslHandshakeRequest struct { Mechanism string + Version int16 } func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { @@ -25,7 +26,11 @@ func (r *SaslHandshakeRequest) key() int16 { } func (r *SaslHandshakeRequest) version() int16 { - return 0 + return r.Version +} + +func (r *SaslHandshakeRequest) headerVersion() int16 { + return 1 } func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go deleted file mode 100644 index 806e628f..00000000 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package sarama - -import "testing" - -var ( - baseSaslRequest = []byte{ - 0, 3, 'f', 'o', 'o', // Mechanism - } -) - -func TestSaslHandshakeRequest(t *testing.T) { - var request *SaslHandshakeRequest - - request = new(SaslHandshakeRequest) - request.Mechanism = "foo" - testRequest(t, "basic", request, baseSaslRequest) -} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go index ef290d4b..69dfc317 100644 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go @@ -33,6 +33,10 @@ func (r *SaslHandshakeResponse) version() int16 { return 0 } +func (r *SaslHandshakeResponse) headerVersion() int16 { + return 0 +} + func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go deleted file mode 100644 index 1fd4c79e..00000000 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package sarama - -import "testing" - -var ( - saslHandshakeResponse = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x03, 'f', 'o', 'o', - } -) - -func TestSaslHandshakeResponse(t *testing.T) { - var response *SaslHandshakeResponse - - response = new(SaslHandshakeResponse) - testVersionDecodable(t, "no error", response, saslHandshakeResponse, 0) - if response.Err != ErrNoError { - t.Error("Decoding error failed: no error expected but found", response.Err) - } - if response.EnabledMechanisms[0] != "foo" { - t.Error("Decoding error failed: expected 'foo' but found", response.EnabledMechanisms) - } -} diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go new file mode 100644 index 00000000..bb0c82c3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go @@ -0,0 +1,124 @@ +package sarama + +type topicPartitionAssignment struct { + Topic string + Partition int32 +} + +type StickyAssignorUserData interface { + partitions() []topicPartitionAssignment + hasGeneration() bool + generation() int +} + +//StickyAssignorUserDataV0 holds topic partition information for an assignment +type StickyAssignorUserDataV0 struct { + Topics map[string][]int32 + + topicPartitions []topicPartitionAssignment +} + +func (m *StickyAssignorUserDataV0) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + return nil +} + +func (m *StickyAssignorUserDataV0) decode(pd packetDecoder) (err error) { + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + m.topicPartitions = populateTopicPartitions(m.Topics) + return nil +} + +func (m *StickyAssignorUserDataV0) partitions() []topicPartitionAssignment { return m.topicPartitions } +func (m *StickyAssignorUserDataV0) hasGeneration() bool { return false } +func (m *StickyAssignorUserDataV0) generation() int { return defaultGeneration } + +//StickyAssignorUserDataV1 holds topic partition information for an assignment +type StickyAssignorUserDataV1 struct { + Topics map[string][]int32 + Generation int32 + + topicPartitions []topicPartitionAssignment +} + +func (m *StickyAssignorUserDataV1) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + pe.putInt32(m.Generation) + return nil +} + +func (m *StickyAssignorUserDataV1) decode(pd packetDecoder) (err error) { + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + + m.Generation, err = pd.getInt32() + if err != nil { + return err + } + m.topicPartitions = populateTopicPartitions(m.Topics) + return nil +} + +func (m *StickyAssignorUserDataV1) partitions() []topicPartitionAssignment { return m.topicPartitions } +func (m *StickyAssignorUserDataV1) hasGeneration() bool { return true } +func (m *StickyAssignorUserDataV1) generation() int { return int(m.Generation) } + +func populateTopicPartitions(topics map[string][]int32) []topicPartitionAssignment { + topicPartitions := make([]topicPartitionAssignment, 0) + for topic, partitions := range topics { + for _, partition := range partitions { + topicPartitions = append(topicPartitions, topicPartitionAssignment{Topic: topic, Partition: partition}) + } + } + return topicPartitions +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go index fe207080..ac6ecb13 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_request.go +++ b/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -77,6 +77,10 @@ func (r *SyncGroupRequest) version() int16 { return 0 } +func (r *SyncGroupRequest) headerVersion() int16 { + return 1 +} + func (r *SyncGroupRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sync_group_request_test.go b/vendor/github.com/Shopify/sarama/sync_group_request_test.go deleted file mode 100644 index 3f537ef9..00000000 --- a/vendor/github.com/Shopify/sarama/sync_group_request_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package sarama - -import "testing" - -var ( - emptySyncGroupRequest = []byte{ - 0, 3, 'f', 'o', 'o', // Group ID - 0x00, 0x01, 0x02, 0x03, // Generation ID - 0, 3, 'b', 'a', 'z', // Member ID - 0, 0, 0, 0, // no assignments - } - - populatedSyncGroupRequest = []byte{ - 0, 3, 'f', 'o', 'o', // Group ID - 0x00, 0x01, 0x02, 0x03, // Generation ID - 0, 3, 'b', 'a', 'z', // Member ID - 0, 0, 0, 1, // one assignment - 0, 3, 'b', 'a', 'z', // Member ID - 0, 0, 0, 3, 'f', 'o', 'o', // Member assignment - } -) - -func TestSyncGroupRequest(t *testing.T) { - var request *SyncGroupRequest - - request = new(SyncGroupRequest) - request.GroupId = "foo" - request.GenerationId = 66051 - request.MemberId = "baz" - testRequest(t, "empty", request, emptySyncGroupRequest) - - request = new(SyncGroupRequest) - request.GroupId = "foo" - request.GenerationId = 66051 - request.MemberId = "baz" - request.AddGroupAssignment("baz", []byte("foo")) - testRequest(t, "populated", request, populatedSyncGroupRequest) -} diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go index 194b382b..af019c42 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_response.go +++ b/vendor/github.com/Shopify/sarama/sync_group_response.go @@ -36,6 +36,10 @@ func (r *SyncGroupResponse) version() int16 { return 0 } +func (r *SyncGroupResponse) headerVersion() int16 { + return 0 +} + func (r *SyncGroupResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sync_group_response_test.go b/vendor/github.com/Shopify/sarama/sync_group_response_test.go deleted file mode 100644 index 6fb70885..00000000 --- a/vendor/github.com/Shopify/sarama/sync_group_response_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package sarama - -import ( - "reflect" - "testing" -) - -var ( - syncGroupResponseNoError = []byte{ - 0x00, 0x00, // No error - 0, 0, 0, 3, 0x01, 0x02, 0x03, // Member assignment data - } - - syncGroupResponseWithError = []byte{ - 0, 27, // ErrRebalanceInProgress - 0, 0, 0, 0, // No member assignment data - } -) - -func TestSyncGroupResponse(t *testing.T) { - var response *SyncGroupResponse - - response = new(SyncGroupResponse) - testVersionDecodable(t, "no error", response, syncGroupResponseNoError, 0) - if response.Err != ErrNoError { - t.Error("Decoding Err failed: no error expected but found", response.Err) - } - if !reflect.DeepEqual(response.MemberAssignment, []byte{0x01, 0x02, 0x03}) { - t.Error("Decoding MemberAssignment failed, found:", response.MemberAssignment) - } - - response = new(SyncGroupResponse) - testVersionDecodable(t, "no error", response, syncGroupResponseWithError, 0) - if response.Err != ErrRebalanceInProgress { - t.Error("Decoding Err failed: ErrRebalanceInProgress expected but found", response.Err) - } - if !reflect.DeepEqual(response.MemberAssignment, []byte{}) { - t.Error("Decoding MemberAssignment failed, found:", response.MemberAssignment) - } -} diff --git a/vendor/github.com/Shopify/sarama/sync_producer_test.go b/vendor/github.com/Shopify/sarama/sync_producer_test.go deleted file mode 100644 index cb97548d..00000000 --- a/vendor/github.com/Shopify/sarama/sync_producer_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package sarama - -import ( - "log" - "sync" - "testing" -) - -func TestSyncProducer(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - for i := 0; i < 10; i++ { - leader.Returns(prodSuccess) - } - - producer, err := NewSyncProducer([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - msg := &ProducerMessage{ - Topic: "my_topic", - Value: StringEncoder(TestMessage), - Metadata: "test", - } - - partition, offset, err := producer.SendMessage(msg) - - if partition != 0 || msg.Partition != partition { - t.Error("Unexpected partition") - } - if offset != 0 || msg.Offset != offset { - t.Error("Unexpected offset") - } - if str, ok := msg.Metadata.(string); !ok || str != "test" { - t.Error("Unexpected metadata") - } - if err != nil { - t.Error(err) - } - } - - safeClose(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestSyncProducerBatch(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 3 - config.Producer.Return.Successes = true - producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - err = producer.SendMessages([]*ProducerMessage{ - { - Topic: "my_topic", - Value: StringEncoder(TestMessage), - Metadata: "test", - }, - { - Topic: "my_topic", - Value: StringEncoder(TestMessage), - Metadata: "test", - }, - { - Topic: "my_topic", - Value: StringEncoder(TestMessage), - Metadata: "test", - }, - }) - - if err != nil { - t.Error(err) - } - - safeClose(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestConcurrentSyncProducer(t *testing.T) { - seedBroker := NewMockBroker(t, 1) - leader := NewMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 100 - config.Producer.Return.Successes = true - producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - wg := sync.WaitGroup{} - - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)} - partition, _, err := producer.SendMessage(msg) - if partition != 0 { - t.Error("Unexpected partition") - } - if err != nil { - t.Error(err) - } - wg.Done() - }() - } - wg.Wait() - - safeClose(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestSyncProducerToNonExistingTopic(t *testing.T) { - broker := NewMockBroker(t, 1) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, ErrNoError) - broker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - config.Producer.Retry.Max = 0 - config.Producer.Return.Successes = true - - producer, err := NewSyncProducer([]string{broker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - metadataResponse = new(MetadataResponse) - metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) - broker.Returns(metadataResponse) - - _, _, err = producer.SendMessage(&ProducerMessage{Topic: "unknown"}) - if err != ErrUnknownTopicOrPartition { - t.Error("Uxpected ErrUnknownTopicOrPartition, found:", err) - } - - safeClose(t, producer) - broker.Close() -} - -// This example shows the basic usage pattern of the SyncProducer. -func ExampleSyncProducer() { - producer, err := NewSyncProducer([]string{"localhost:9092"}, nil) - if err != nil { - log.Fatalln(err) - } - defer func() { - if err := producer.Close(); err != nil { - log.Fatalln(err) - } - }() - - msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} - partition, offset, err := producer.SendMessage(msg) - if err != nil { - log.Printf("FAILED to send message: %s\n", err) - } else { - log.Printf("> message sent to partition %d at offset %d\n", partition, offset) - } -} diff --git a/vendor/github.com/Shopify/sarama/tools/README.md b/vendor/github.com/Shopify/sarama/tools/README.md deleted file mode 100644 index 3464c4ad..00000000 --- a/vendor/github.com/Shopify/sarama/tools/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Sarama tools - -This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation. -Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function. - -- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer. -- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster. -- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster. - -To install all tools, run `go get github.com/Shopify/sarama/tools/...` diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore deleted file mode 100644 index 67da9dfa..00000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -kafka-console-consumer -kafka-console-consumer.test diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md deleted file mode 100644 index 4e77f0b7..00000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# kafka-console-consumer - -A simple command line tool to consume partitions of a topic and print the -messages on the standard output. - -### Installation - - go get github.com/Shopify/sarama/tools/kafka-console-consumer - -### Usage - - # Minimum invocation - kafka-console-consumer -topic=test -brokers=kafka1:9092 - - # It will pick up a KAFKA_PEERS environment variable - export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 - kafka-console-consumer -topic=test - - # You can specify the offset you want to start at. It can be either - # `oldest`, `newest`. The default is `newest`. - kafka-console-consumer -topic=test -offset=oldest - kafka-console-consumer -topic=test -offset=newest - - # You can specify the partition(s) you want to consume as a comma-separated - # list. The default is `all`. - kafka-console-consumer -topic=test -partitions=1,2,3 - - # Display all command line options - kafka-console-consumer -help diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go deleted file mode 100644 index 0f1eb89a..00000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go +++ /dev/null @@ -1,145 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "os/signal" - "strconv" - "strings" - "sync" - - "github.com/Shopify/sarama" -) - -var ( - brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") - topic = flag.String("topic", "", "REQUIRED: the topic to consume") - partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers") - offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`") - verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") - bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.") - - logger = log.New(os.Stderr, "", log.LstdFlags) -) - -func main() { - flag.Parse() - - if *brokerList == "" { - printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") - } - - if *topic == "" { - printUsageErrorAndExit("-topic is required") - } - - if *verbose { - sarama.Logger = logger - } - - var initialOffset int64 - switch *offset { - case "oldest": - initialOffset = sarama.OffsetOldest - case "newest": - initialOffset = sarama.OffsetNewest - default: - printUsageErrorAndExit("-offset should be `oldest` or `newest`") - } - - c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil) - if err != nil { - printErrorAndExit(69, "Failed to start consumer: %s", err) - } - - partitionList, err := getPartitions(c) - if err != nil { - printErrorAndExit(69, "Failed to get the list of partitions: %s", err) - } - - var ( - messages = make(chan *sarama.ConsumerMessage, *bufferSize) - closing = make(chan struct{}) - wg sync.WaitGroup - ) - - go func() { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Kill, os.Interrupt) - <-signals - logger.Println("Initiating shutdown of consumer...") - close(closing) - }() - - for _, partition := range partitionList { - pc, err := c.ConsumePartition(*topic, partition, initialOffset) - if err != nil { - printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err) - } - - go func(pc sarama.PartitionConsumer) { - <-closing - pc.AsyncClose() - }(pc) - - wg.Add(1) - go func(pc sarama.PartitionConsumer) { - defer wg.Done() - for message := range pc.Messages() { - messages <- message - } - }(pc) - } - - go func() { - for msg := range messages { - fmt.Printf("Partition:\t%d\n", msg.Partition) - fmt.Printf("Offset:\t%d\n", msg.Offset) - fmt.Printf("Key:\t%s\n", string(msg.Key)) - fmt.Printf("Value:\t%s\n", string(msg.Value)) - fmt.Println() - } - }() - - wg.Wait() - logger.Println("Done consuming topic", *topic) - close(messages) - - if err := c.Close(); err != nil { - logger.Println("Failed to close consumer: ", err) - } -} - -func getPartitions(c sarama.Consumer) ([]int32, error) { - if *partitions == "all" { - return c.Partitions(*topic) - } - - tmp := strings.Split(*partitions, ",") - var pList []int32 - for i := range tmp { - val, err := strconv.ParseInt(tmp[i], 10, 32) - if err != nil { - return nil, err - } - pList = append(pList, int32(val)) - } - - return pList, nil -} - -func printErrorAndExit(code int, format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - os.Exit(code) -} - -func printUsageErrorAndExit(format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - fmt.Fprintln(os.Stderr, "Available command line options:") - flag.PrintDefaults() - os.Exit(64) -} diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore deleted file mode 100644 index 5837fe8c..00000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -kafka-console-partitionconsumer -kafka-console-partitionconsumer.test diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md deleted file mode 100644 index 646dd5f5..00000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# kafka-console-partitionconsumer - -NOTE: this tool is deprecated in favour of the more general and more powerful -`kafka-console-consumer`. - -A simple command line tool to consume a partition of a topic and print the messages -on the standard output. - -### Installation - - go get github.com/Shopify/sarama/tools/kafka-console-partitionconsumer - -### Usage - - # Minimum invocation - kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092 - - # It will pick up a KAFKA_PEERS environment variable - export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 - kafka-console-partitionconsumer -topic=test -partition=4 - - # You can specify the offset you want to start at. It can be either - # `oldest`, `newest`, or a specific offset number - kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest - kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337 - - # Display all command line options - kafka-console-partitionconsumer -help diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go deleted file mode 100644 index d5e4464d..00000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go +++ /dev/null @@ -1,102 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "os/signal" - "strconv" - "strings" - - "github.com/Shopify/sarama" -) - -var ( - brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") - topic = flag.String("topic", "", "REQUIRED: the topic to consume") - partition = flag.Int("partition", -1, "REQUIRED: the partition to consume") - offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset") - verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") - - logger = log.New(os.Stderr, "", log.LstdFlags) -) - -func main() { - flag.Parse() - - if *brokerList == "" { - printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") - } - - if *topic == "" { - printUsageErrorAndExit("-topic is required") - } - - if *partition == -1 { - printUsageErrorAndExit("-partition is required") - } - - if *verbose { - sarama.Logger = logger - } - - var ( - initialOffset int64 - offsetError error - ) - switch *offset { - case "oldest": - initialOffset = sarama.OffsetOldest - case "newest": - initialOffset = sarama.OffsetNewest - default: - initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64) - } - - if offsetError != nil { - printUsageErrorAndExit("Invalid initial offset: %s", *offset) - } - - c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil) - if err != nil { - printErrorAndExit(69, "Failed to start consumer: %s", err) - } - - pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset) - if err != nil { - printErrorAndExit(69, "Failed to start partition consumer: %s", err) - } - - go func() { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Kill, os.Interrupt) - <-signals - pc.AsyncClose() - }() - - for msg := range pc.Messages() { - fmt.Printf("Offset:\t%d\n", msg.Offset) - fmt.Printf("Key:\t%s\n", string(msg.Key)) - fmt.Printf("Value:\t%s\n", string(msg.Value)) - fmt.Println() - } - - if err := c.Close(); err != nil { - logger.Println("Failed to close consumer: ", err) - } -} - -func printErrorAndExit(code int, format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - os.Exit(code) -} - -func printUsageErrorAndExit(format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - fmt.Fprintln(os.Stderr, "Available command line options:") - flag.PrintDefaults() - os.Exit(64) -} diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore deleted file mode 100644 index 2b9e563a..00000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -kafka-console-producer -kafka-console-producer.test diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md deleted file mode 100644 index 6b3a65f2..00000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# kafka-console-producer - -A simple command line tool to produce a single message to Kafka. - -### Installation - - go get github.com/Shopify/sarama/tools/kafka-console-producer - - -### Usage - - # Minimum invocation - kafka-console-producer -topic=test -value=value -brokers=kafka1:9092 - - # It will pick up a KAFKA_PEERS environment variable - export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 - kafka-console-producer -topic=test -value=value - - # It will read the value from stdin by using pipes - echo "hello world" | kafka-console-producer -topic=test - - # Specify a key: - echo "hello world" | kafka-console-producer -topic=test -key=key - - # Partitioning: by default, kafka-console-producer will partition as follows: - # - manual partitioning if a -partition is provided - # - hash partitioning by key if a -key is provided - # - random partioning otherwise. - # - # You can override this using the -partitioner argument: - echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random - - # Display all command line options - kafka-console-producer -help diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go deleted file mode 100644 index 83054ed7..00000000 --- a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go +++ /dev/null @@ -1,124 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "strings" - - "github.com/Shopify/sarama" - "github.com/rcrowley/go-metrics" -) - -var ( - brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable") - topic = flag.String("topic", "", "REQUIRED: the topic to produce to") - key = flag.String("key", "", "The key of the message to produce. Can be empty.") - value = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.") - partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`") - partition = flag.Int("partition", -1, "The partition to produce to.") - verbose = flag.Bool("verbose", false, "Turn on sarama logging to stderr") - showMetrics = flag.Bool("metrics", false, "Output metrics on successful publish to stderr") - silent = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout") - - logger = log.New(os.Stderr, "", log.LstdFlags) -) - -func main() { - flag.Parse() - - if *brokerList == "" { - printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable") - } - - if *topic == "" { - printUsageErrorAndExit("no -topic specified") - } - - if *verbose { - sarama.Logger = logger - } - - config := sarama.NewConfig() - config.Producer.RequiredAcks = sarama.WaitForAll - config.Producer.Return.Successes = true - - switch *partitioner { - case "": - if *partition >= 0 { - config.Producer.Partitioner = sarama.NewManualPartitioner - } else { - config.Producer.Partitioner = sarama.NewHashPartitioner - } - case "hash": - config.Producer.Partitioner = sarama.NewHashPartitioner - case "random": - config.Producer.Partitioner = sarama.NewRandomPartitioner - case "manual": - config.Producer.Partitioner = sarama.NewManualPartitioner - if *partition == -1 { - printUsageErrorAndExit("-partition is required when partitioning manually") - } - default: - printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner)) - } - - message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)} - - if *key != "" { - message.Key = sarama.StringEncoder(*key) - } - - if *value != "" { - message.Value = sarama.StringEncoder(*value) - } else if stdinAvailable() { - bytes, err := ioutil.ReadAll(os.Stdin) - if err != nil { - printErrorAndExit(66, "Failed to read data from the standard input: %s", err) - } - message.Value = sarama.ByteEncoder(bytes) - } else { - printUsageErrorAndExit("-value is required, or you have to provide the value on stdin") - } - - producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config) - if err != nil { - printErrorAndExit(69, "Failed to open Kafka producer: %s", err) - } - defer func() { - if err := producer.Close(); err != nil { - logger.Println("Failed to close Kafka producer cleanly:", err) - } - }() - - partition, offset, err := producer.SendMessage(message) - if err != nil { - printErrorAndExit(69, "Failed to produce message: %s", err) - } else if !*silent { - fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset) - } - if *showMetrics { - metrics.WriteOnce(config.MetricRegistry, os.Stderr) - } -} - -func printErrorAndExit(code int, format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - os.Exit(code) -} - -func printUsageErrorAndExit(message string) { - fmt.Fprintln(os.Stderr, "ERROR:", message) - fmt.Fprintln(os.Stderr) - fmt.Fprintln(os.Stderr, "Available command line options:") - flag.PrintDefaults() - os.Exit(64) -} - -func stdinAvailable() bool { - stat, _ := os.Stdin.Stat() - return (stat.Mode() & os.ModeCharDevice) == 0 -} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go index 71e95b81..c4043a33 100644 --- a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go @@ -91,6 +91,10 @@ func (a *TxnOffsetCommitRequest) version() int16 { return 0 } +func (a *TxnOffsetCommitRequest) headerVersion() int16 { + return 1 +} + func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request_test.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request_test.go deleted file mode 100644 index fe3bba54..00000000 --- a/vendor/github.com/Shopify/sarama/txn_offset_commit_request_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package sarama - -import "testing" - -var ( - txnOffsetCommitRequest = []byte{ - 0, 3, 't', 'x', 'n', - 0, 7, 'g', 'r', 'o', 'u', 'p', 'i', 'd', - 0, 0, 0, 0, 0, 0, 31, 64, // producer ID - 0, 1, // producer epoch - 0, 0, 0, 1, // 1 topic - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, 0, 1, // 1 partition - 0, 0, 0, 2, // partition no 2 - 0, 0, 0, 0, 0, 0, 0, 123, - 255, 255, // no meta data - } -) - -func TestTxnOffsetCommitRequest(t *testing.T) { - req := &TxnOffsetCommitRequest{ - TransactionalID: "txn", - GroupID: "groupid", - ProducerID: 8000, - ProducerEpoch: 1, - Topics: map[string][]*PartitionOffsetMetadata{ - "topic": []*PartitionOffsetMetadata{{ - Offset: 123, - Partition: 2, - }}, - }, - } - - testRequest(t, "", req, txnOffsetCommitRequest) -} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go index 6c980f40..94d8029d 100644 --- a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go @@ -78,6 +78,10 @@ func (a *TxnOffsetCommitResponse) version() int16 { return 0 } +func (a *TxnOffsetCommitResponse) headerVersion() int16 { + return 0 +} + func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response_test.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response_test.go deleted file mode 100644 index b275265c..00000000 --- a/vendor/github.com/Shopify/sarama/txn_offset_commit_response_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package sarama - -import ( - "testing" - "time" -) - -var ( - txnOffsetCommitResponse = []byte{ - 0, 0, 0, 100, - 0, 0, 0, 1, // 1 topic - 0, 5, 't', 'o', 'p', 'i', 'c', - 0, 0, 0, 1, // 1 partition response - 0, 0, 0, 2, // partition number 2 - 0, 47, // err - } -) - -func TestTxnOffsetCommitResponse(t *testing.T) { - resp := &TxnOffsetCommitResponse{ - ThrottleTime: 100 * time.Millisecond, - Topics: map[string][]*PartitionError{ - "topic": []*PartitionError{{ - Partition: 2, - Err: ErrInvalidProducerEpoch, - }}, - }, - } - - testResponse(t, "", resp, txnOffsetCommitResponse) -} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go index 1bb00d76..de4d7a10 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -26,9 +26,7 @@ func (slice int32Slice) Swap(i, j int) { func dupInt32Slice(input []int32) []int32 { ret := make([]int32, 0, len(input)) - for _, val := range input { - ret = append(ret, val) - } + ret = append(ret, input...) return ret } @@ -155,7 +153,16 @@ var ( V0_11_0_2 = newKafkaVersion(0, 11, 0, 2) V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) V1_1_0_0 = newKafkaVersion(1, 1, 0, 0) + V1_1_1_0 = newKafkaVersion(1, 1, 1, 0) V2_0_0_0 = newKafkaVersion(2, 0, 0, 0) + V2_0_1_0 = newKafkaVersion(2, 0, 1, 0) + V2_1_0_0 = newKafkaVersion(2, 1, 0, 0) + V2_2_0_0 = newKafkaVersion(2, 2, 0, 0) + V2_3_0_0 = newKafkaVersion(2, 3, 0, 0) + V2_4_0_0 = newKafkaVersion(2, 4, 0, 0) + V2_5_0_0 = newKafkaVersion(2, 5, 0, 0) + V2_6_0_0 = newKafkaVersion(2, 6, 0, 0) + V2_7_0_0 = newKafkaVersion(2, 7, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -174,15 +181,26 @@ var ( V0_11_0_2, V1_0_0_0, V1_1_0_0, + V1_1_1_0, V2_0_0_0, + V2_0_1_0, + V2_1_0_0, + V2_2_0_0, + V2_3_0_0, + V2_4_0_0, + V2_5_0_0, + V2_6_0_0, + V2_7_0_0, } - MinVersion = V0_8_2_0 - MaxVersion = V2_0_0_0 + MinVersion = V0_8_2_0 + MaxVersion = V2_7_0_0 + DefaultVersion = V1_0_0_0 ) +//ParseKafkaVersion parses and returns kafka version or error from a string func ParseKafkaVersion(s string) (KafkaVersion, error) { if len(s) < 5 { - return MinVersion, fmt.Errorf("invalid version `%s`", s) + return DefaultVersion, fmt.Errorf("invalid version `%s`", s) } var major, minor, veryMinor, patch uint var err error @@ -192,7 +210,7 @@ func ParseKafkaVersion(s string) (KafkaVersion, error) { err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) } if err != nil { - return MinVersion, err + return DefaultVersion, err } return newKafkaVersion(major, minor, veryMinor, patch), nil } @@ -208,7 +226,7 @@ func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error func (v KafkaVersion) String() string { if v.version[0] == 0 { return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3]) - } else { - return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2]) } + + return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2]) } diff --git a/vendor/github.com/Shopify/sarama/utils_test.go b/vendor/github.com/Shopify/sarama/utils_test.go deleted file mode 100644 index 013620e5..00000000 --- a/vendor/github.com/Shopify/sarama/utils_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package sarama - -import "testing" - -func TestVersionCompare(t *testing.T) { - if V0_8_2_0.IsAtLeast(V0_8_2_1) { - t.Error("0.8.2.0 >= 0.8.2.1") - } - if !V0_8_2_1.IsAtLeast(V0_8_2_0) { - t.Error("! 0.8.2.1 >= 0.8.2.0") - } - if !V0_8_2_0.IsAtLeast(V0_8_2_0) { - t.Error("! 0.8.2.0 >= 0.8.2.0") - } - if !V0_9_0_0.IsAtLeast(V0_8_2_1) { - t.Error("! 0.9.0.0 >= 0.8.2.1") - } - if V0_8_2_1.IsAtLeast(V0_10_0_0) { - t.Error("0.8.2.1 >= 0.10.0.0") - } -} - -func TestVersionParsing(t *testing.T) { - validVersions := []string{"0.8.2.0", "0.8.2.1", "0.9.0.0", "0.10.2.0", "1.0.0"} - for _, s := range validVersions { - v, err := ParseKafkaVersion(s) - if err != nil { - t.Errorf("could not parse valid version %s: %s", s, err) - } - if v.String() != s { - t.Errorf("version %s != %s", v.String(), s) - } - } - - invalidVersions := []string{"0.8.2-4", "0.8.20", "1.19.0.0", "1.0.x"} - for _, s := range invalidVersions { - if _, err := ParseKafkaVersion(s); err == nil { - t.Errorf("invalid version %s parsed without error", s) - } - } -} diff --git a/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh b/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh deleted file mode 100755 index 95e47dde..00000000 --- a/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -set -ex - -# Launch and wait for toxiproxy -${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh & -while ! nc -q 1 localhost 2181 ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid -done diff --git a/vendor/github.com/Shopify/sarama/vagrant/kafka.conf b/vendor/github.com/Shopify/sarama/vagrant/kafka.conf deleted file mode 100644 index 25101df5..00000000 --- a/vendor/github.com/Shopify/sarama/vagrant/kafka.conf +++ /dev/null @@ -1,9 +0,0 @@ -start on started zookeeper-ZK_PORT -stop on stopping zookeeper-ZK_PORT - -# Use a script instead of exec (using env stanza leaks KAFKA_HEAP_OPTS from zookeeper) -script - sleep 2 - export KAFKA_HEAP_OPTS="-Xmx320m" - exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties -end script diff --git a/vendor/github.com/Shopify/sarama/vagrant/provision.sh b/vendor/github.com/Shopify/sarama/vagrant/provision.sh deleted file mode 100755 index 13a8d562..00000000 --- a/vendor/github.com/Shopify/sarama/vagrant/provision.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -set -ex - -apt-get update -yes | apt-get install default-jre - -export KAFKA_INSTALL_ROOT=/opt -export KAFKA_HOSTNAME=192.168.100.67 -export KAFKA_VERSION=1.0.0 -export REPOSITORY_ROOT=/vagrant - -sh /vagrant/vagrant/install_cluster.sh -sh /vagrant/vagrant/setup_services.sh -sh /vagrant/vagrant/create_topics.sh diff --git a/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh b/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh deleted file mode 100755 index e52c00e7..00000000 --- a/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -set -ex - -${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 & -PID=$! - -while ! nc -q 1 localhost 8474 - -# The number of threads handling network requests -num.network.threads=2 - -# The number of threads doing disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=1048576 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=1048576 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma seperated list of directories under which to store log files -log.dirs=KAFKA_DATADIR - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=2 - -# Create new topics with a replication factor of 2 so failover can be tested -# more easily. -default.replication.factor=2 - -auto.create.topics.enable=false -delete.topic.enable=true - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining -# segments don't drop below log.retention.bytes. -log.retention.bytes=268435456 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=268435456 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.retention.check.interval.ms=60000 - -# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires. -# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction. -log.cleaner.enable=false - -############################# Zookeeper ############################# - -# Zookeeper connection string (see zookeeper docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:ZK_PORT - -# Timeout in ms for connecting to zookeeper -zookeeper.session.timeout.ms=3000 -zookeeper.connection.timeout.ms=3000 diff --git a/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh b/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh deleted file mode 100755 index 81d8ea05..00000000 --- a/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh - -set -ex - -stop toxiproxy || true -cp ${REPOSITORY_ROOT}/vagrant/toxiproxy.conf /etc/init/toxiproxy.conf -cp ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh ${KAFKA_INSTALL_ROOT}/ -start toxiproxy - -for i in 1 2 3 4 5; do - ZK_PORT=`expr $i + 2180` - KAFKA_PORT=`expr $i + 9090` - - stop zookeeper-${ZK_PORT} || true - - # set up zk service - cp ${REPOSITORY_ROOT}/vagrant/zookeeper.conf /etc/init/zookeeper-${ZK_PORT}.conf - sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/zookeeper-${ZK_PORT}.conf - - # set up kafka service - cp ${REPOSITORY_ROOT}/vagrant/kafka.conf /etc/init/kafka-${KAFKA_PORT}.conf - sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf - sed -i s/ZK_PORT/${ZK_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf - - start zookeeper-${ZK_PORT} -done - -# Wait for the last kafka node to finish booting -while ! nc -q 1 localhost 29095 ", v.j) -} - -type W struct { - k int -} - -func (w *W) Error() string { - if w == nil { - return "nilW" - } - return fmt.Sprintf("[%d]", w.k) -} - -var tVal = &T{ - True: true, - I: 17, - U16: 16, - X: "x", - U: &U{"v"}, - V0: V{6666}, - V1: &V{7777}, // leave V2 as nil - W0: W{888}, - W1: &W{999}, // leave W2 as nil - SI: []int{3, 4, 5}, - SB: []bool{true, false}, - MSI: map[string]int{"one": 1, "two": 2, "three": 3}, - MSIone: map[string]int{"one": 1}, - MXI: map[interface{}]int{"one": 1}, - MII: map[int]int{1: 1}, - SMSI: []map[string]int{ - {"one": 1, "two": 2}, - {"eleven": 11, "twelve": 12}, - }, - Empty1: 3, - Empty2: "empty2", - Empty3: []int{7, 8}, - Empty4: &U{"UinEmpty"}, - NonEmptyInterface: new(T), - Str: bytes.NewBuffer([]byte("foozle")), - Err: errors.New("erroozle"), - PI: newInt(23), - PS: newString("a string"), - PSI: newIntSlice(21, 22, 23), - BinaryFunc: func(a, b string) string { return fmt.Sprintf("[%s=%s]", a, b) }, - VariadicFunc: func(s ...string) string { return fmt.Sprint("<", strings.Join(s, "+"), ">") }, - VariadicFuncInt: func(a int, s ...string) string { return fmt.Sprint(a, "=<", strings.Join(s, "+"), ">") }, - NilOKFunc: func(s *int) bool { return s == nil }, - ErrFunc: func() (string, error) { return "bla", nil }, - Tmpl: Must(New("x").Parse("test template")), // "x" is the value of .X -} - -// A non-empty interface. -type I interface { - Method0() string -} - -var iVal I = tVal - -// Helpers for creation. -func newInt(n int) *int { - return &n -} - -func newString(s string) *string { - return &s -} - -func newIntSlice(n ...int) *[]int { - p := new([]int) - *p = make([]int, len(n)) - copy(*p, n) - return p -} - -// Simple methods with and without arguments. -func (t *T) Method0() string { - return "M0" -} - -func (t *T) Method1(a int) int { - return a -} - -func (t *T) Method2(a uint16, b string) string { - return fmt.Sprintf("Method2: %d %s", a, b) -} - -func (t *T) Method3(v interface{}) string { - return fmt.Sprintf("Method3: %v", v) -} - -func (t *T) Copy() *T { - n := new(T) - *n = *t - return n -} - -func (t *T) MAdd(a int, b []int) []int { - v := make([]int, len(b)) - for i, x := range b { - v[i] = x + a - } - return v -} - -var myError = errors.New("my error") - -// MyError returns a value and an error according to its argument. -func (t *T) MyError(error bool) (bool, error) { - if error { - return true, myError - } - return false, nil -} - -// A few methods to test chaining. -func (t *T) GetU() *U { - return t.U -} - -func (u *U) TrueFalse(b bool) string { - if b { - return "true" - } - return "" -} - -func typeOf(arg interface{}) string { - return fmt.Sprintf("%T", arg) -} - -type execTest struct { - name string - input string - output string - data interface{} - ok bool -} - -// bigInt and bigUint are hex string representing numbers either side -// of the max int boundary. -// We do it this way so the test doesn't depend on ints being 32 bits. -var ( - bigInt = fmt.Sprintf("0x%x", int(1<", tVal, true}, - {"map .one interface", "{{.MXI.one}}", "1", tVal, true}, - {"map .WRONG args", "{{.MSI.one 1}}", "", tVal, false}, - {"map .WRONG type", "{{.MII.one}}", "", tVal, false}, - - // Dots of all kinds to test basic evaluation. - {"dot int", "<{{.}}>", "<13>", 13, true}, - {"dot uint", "<{{.}}>", "<14>", uint(14), true}, - {"dot float", "<{{.}}>", "<15.1>", 15.1, true}, - {"dot bool", "<{{.}}>", "", true, true}, - {"dot complex", "<{{.}}>", "<(16.2-17i)>", 16.2 - 17i, true}, - {"dot string", "<{{.}}>", "", "hello", true}, - {"dot slice", "<{{.}}>", "<[-1 -2 -3]>", []int{-1, -2, -3}, true}, - {"dot map", "<{{.}}>", "", map[string]int{"two": 22}, true}, - {"dot struct", "<{{.}}>", "<{7 seven}>", struct { - a int - b string - }{7, "seven"}, true}, - - // Variables. - {"$ int", "{{$}}", "123", 123, true}, - {"$.I", "{{$.I}}", "17", tVal, true}, - {"$.U.V", "{{$.U.V}}", "v", tVal, true}, - {"declare in action", "{{$x := $.U.V}}{{$x}}", "v", tVal, true}, - - // Type with String method. - {"V{6666}.String()", "-{{.V0}}-", "-<6666>-", tVal, true}, - {"&V{7777}.String()", "-{{.V1}}-", "-<7777>-", tVal, true}, - {"(*V)(nil).String()", "-{{.V2}}-", "-nilV-", tVal, true}, - - // Type with Error method. - {"W{888}.Error()", "-{{.W0}}-", "-[888]-", tVal, true}, - {"&W{999}.Error()", "-{{.W1}}-", "-[999]-", tVal, true}, - {"(*W)(nil).Error()", "-{{.W2}}-", "-nilW-", tVal, true}, - - // Pointers. - {"*int", "{{.PI}}", "23", tVal, true}, - {"*string", "{{.PS}}", "a string", tVal, true}, - {"*[]int", "{{.PSI}}", "[21 22 23]", tVal, true}, - {"*[]int[1]", "{{index .PSI 1}}", "22", tVal, true}, - {"NIL", "{{.NIL}}", "", tVal, true}, - - // Empty interfaces holding values. - {"empty nil", "{{.Empty0}}", "", tVal, true}, - {"empty with int", "{{.Empty1}}", "3", tVal, true}, - {"empty with string", "{{.Empty2}}", "empty2", tVal, true}, - {"empty with slice", "{{.Empty3}}", "[7 8]", tVal, true}, - {"empty with struct", "{{.Empty4}}", "{UinEmpty}", tVal, true}, - {"empty with struct, field", "{{.Empty4.V}}", "UinEmpty", tVal, true}, - - // Method calls. - {".Method0", "-{{.Method0}}-", "-M0-", tVal, true}, - {".Method1(1234)", "-{{.Method1 1234}}-", "-1234-", tVal, true}, - {".Method1(.I)", "-{{.Method1 .I}}-", "-17-", tVal, true}, - {".Method2(3, .X)", "-{{.Method2 3 .X}}-", "-Method2: 3 x-", tVal, true}, - {".Method2(.U16, `str`)", "-{{.Method2 .U16 `str`}}-", "-Method2: 16 str-", tVal, true}, - {".Method2(.U16, $x)", "{{if $x := .X}}-{{.Method2 .U16 $x}}{{end}}-", "-Method2: 16 x-", tVal, true}, - {".Method3(nil constant)", "-{{.Method3 nil}}-", "-Method3: -", tVal, true}, - {".Method3(nil value)", "-{{.Method3 .MXI.unset}}-", "-Method3: -", tVal, true}, - {"method on var", "{{if $x := .}}-{{$x.Method2 .U16 $x.X}}{{end}}-", "-Method2: 16 x-", tVal, true}, - {"method on chained var", - "{{range .MSIone}}{{if $.U.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}", - "true", tVal, true}, - {"chained method", - "{{range .MSIone}}{{if $.GetU.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}", - "true", tVal, true}, - {"chained method on variable", - "{{with $x := .}}{{with .SI}}{{$.GetU.TrueFalse $.True}}{{end}}{{end}}", - "true", tVal, true}, - {".NilOKFunc not nil", "{{call .NilOKFunc .PI}}", "false", tVal, true}, - {".NilOKFunc nil", "{{call .NilOKFunc nil}}", "true", tVal, true}, - - // Function call builtin. - {".BinaryFunc", "{{call .BinaryFunc `1` `2`}}", "[1=2]", tVal, true}, - {".VariadicFunc0", "{{call .VariadicFunc}}", "<>", tVal, true}, - {".VariadicFunc2", "{{call .VariadicFunc `he` `llo`}}", "", tVal, true}, - {".VariadicFuncInt", "{{call .VariadicFuncInt 33 `he` `llo`}}", "33=", tVal, true}, - {"if .BinaryFunc call", "{{ if .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{end}}", "[1=2]", tVal, true}, - {"if not .BinaryFunc call", "{{ if not .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{else}}No{{end}}", "No", tVal, true}, - {"Interface Call", `{{stringer .S}}`, "foozle", map[string]interface{}{"S": bytes.NewBufferString("foozle")}, true}, - {".ErrFunc", "{{call .ErrFunc}}", "bla", tVal, true}, - - // Erroneous function calls (check args). - {".BinaryFuncTooFew", "{{call .BinaryFunc `1`}}", "", tVal, false}, - {".BinaryFuncTooMany", "{{call .BinaryFunc `1` `2` `3`}}", "", tVal, false}, - {".BinaryFuncBad0", "{{call .BinaryFunc 1 3}}", "", tVal, false}, - {".BinaryFuncBad1", "{{call .BinaryFunc `1` 3}}", "", tVal, false}, - {".VariadicFuncBad0", "{{call .VariadicFunc 3}}", "", tVal, false}, - {".VariadicFuncIntBad0", "{{call .VariadicFuncInt}}", "", tVal, false}, - {".VariadicFuncIntBad`", "{{call .VariadicFuncInt `x`}}", "", tVal, false}, - {".VariadicFuncNilBad", "{{call .VariadicFunc nil}}", "", tVal, false}, - - // Pipelines. - {"pipeline", "-{{.Method0 | .Method2 .U16}}-", "-Method2: 16 M0-", tVal, true}, - {"pipeline func", "-{{call .VariadicFunc `llo` | call .VariadicFunc `he` }}-", "->-", tVal, true}, - - // Parenthesized expressions - {"parens in pipeline", "{{printf `%d %d %d` (1) (2 | add 3) (add 4 (add 5 6))}}", "1 5 15", tVal, true}, - - // Parenthesized expressions with field accesses - {"parens: $ in paren", "{{($).X}}", "x", tVal, true}, - {"parens: $.GetU in paren", "{{($.GetU).V}}", "v", tVal, true}, - {"parens: $ in paren in pipe", "{{($ | echo).X}}", "x", tVal, true}, - {"parens: spaces and args", `{{(makemap "up" "down" "left" "right").left}}`, "right", tVal, true}, - - // If. - {"if true", "{{if true}}TRUE{{end}}", "TRUE", tVal, true}, - {"if false", "{{if false}}TRUE{{else}}FALSE{{end}}", "FALSE", tVal, true}, - {"if nil", "{{if nil}}TRUE{{end}}", "", tVal, false}, - {"if 1", "{{if 1}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true}, - {"if 0", "{{if 0}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"if 1.5", "{{if 1.5}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true}, - {"if 0.0", "{{if .FloatZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"if 1.5i", "{{if 1.5i}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true}, - {"if 0.0i", "{{if .ComplexZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"if emptystring", "{{if ``}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"if string", "{{if `notempty`}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true}, - {"if emptyslice", "{{if .SIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"if slice", "{{if .SI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true}, - {"if emptymap", "{{if .MSIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"if map", "{{if .MSI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true}, - {"if map unset", "{{if .MXI.none}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"if map not unset", "{{if not .MXI.none}}ZERO{{else}}NON-ZERO{{end}}", "ZERO", tVal, true}, - {"if $x with $y int", "{{if $x := true}}{{with $y := .I}}{{$x}},{{$y}}{{end}}{{end}}", "true,17", tVal, true}, - {"if $x with $x int", "{{if $x := true}}{{with $x := .I}}{{$x}},{{end}}{{$x}}{{end}}", "17,true", tVal, true}, - {"if else if", "{{if false}}FALSE{{else if true}}TRUE{{end}}", "TRUE", tVal, true}, - {"if else chain", "{{if eq 1 3}}1{{else if eq 2 3}}2{{else if eq 3 3}}3{{end}}", "3", tVal, true}, - - // Print etc. - {"print", `{{print "hello, print"}}`, "hello, print", tVal, true}, - {"print 123", `{{print 1 2 3}}`, "1 2 3", tVal, true}, - {"print nil", `{{print nil}}`, "", tVal, true}, - {"println", `{{println 1 2 3}}`, "1 2 3\n", tVal, true}, - {"printf int", `{{printf "%04x" 127}}`, "007f", tVal, true}, - {"printf float", `{{printf "%g" 3.5}}`, "3.5", tVal, true}, - {"printf complex", `{{printf "%g" 1+7i}}`, "(1+7i)", tVal, true}, - {"printf string", `{{printf "%s" "hello"}}`, "hello", tVal, true}, - {"printf function", `{{printf "%#q" zeroArgs}}`, "`zeroArgs`", tVal, true}, - {"printf field", `{{printf "%s" .U.V}}`, "v", tVal, true}, - {"printf method", `{{printf "%s" .Method0}}`, "M0", tVal, true}, - {"printf dot", `{{with .I}}{{printf "%d" .}}{{end}}`, "17", tVal, true}, - {"printf var", `{{with $x := .I}}{{printf "%d" $x}}{{end}}`, "17", tVal, true}, - {"printf lots", `{{printf "%d %s %g %s" 127 "hello" 7-3i .Method0}}`, "127 hello (7-3i) M0", tVal, true}, - - // HTML. - {"html", `{{html ""}}`, - "<script>alert("XSS");</script>", nil, true}, - {"html pipeline", `{{printf "" | html}}`, - "<script>alert("XSS");</script>", nil, true}, - {"html", `{{html .PS}}`, "a string", tVal, true}, - - // JavaScript. - {"js", `{{js .}}`, `It\'d be nice.`, `It'd be nice.`, true}, - - // URL query. - {"urlquery", `{{"http://www.example.org/"|urlquery}}`, "http%3A%2F%2Fwww.example.org%2F", nil, true}, - - // Booleans - {"not", "{{not true}} {{not false}}", "false true", nil, true}, - {"and", "{{and false 0}} {{and 1 0}} {{and 0 true}} {{and 1 1}}", "false 0 0 1", nil, true}, - {"or", "{{or 0 0}} {{or 1 0}} {{or 0 true}} {{or 1 1}}", "0 1 true 1", nil, true}, - {"boolean if", "{{if and true 1 `hi`}}TRUE{{else}}FALSE{{end}}", "TRUE", tVal, true}, - {"boolean if not", "{{if and true 1 `hi` | not}}TRUE{{else}}FALSE{{end}}", "FALSE", nil, true}, - - // Indexing. - {"slice[0]", "{{index .SI 0}}", "3", tVal, true}, - {"slice[1]", "{{index .SI 1}}", "4", tVal, true}, - {"slice[HUGE]", "{{index .SI 10}}", "", tVal, false}, - {"slice[WRONG]", "{{index .SI `hello`}}", "", tVal, false}, - {"map[one]", "{{index .MSI `one`}}", "1", tVal, true}, - {"map[two]", "{{index .MSI `two`}}", "2", tVal, true}, - {"map[NO]", "{{index .MSI `XXX`}}", "0", tVal, true}, - {"map[nil]", "{{index .MSI nil}}", "0", tVal, true}, - {"map[WRONG]", "{{index .MSI 10}}", "", tVal, false}, - {"double index", "{{index .SMSI 1 `eleven`}}", "11", tVal, true}, - - // Len. - {"slice", "{{len .SI}}", "3", tVal, true}, - {"map", "{{len .MSI }}", "3", tVal, true}, - {"len of int", "{{len 3}}", "", tVal, false}, - {"len of nothing", "{{len .Empty0}}", "", tVal, false}, - - // With. - {"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true}, - {"with false", "{{with false}}{{.}}{{else}}FALSE{{end}}", "FALSE", tVal, true}, - {"with 1", "{{with 1}}{{.}}{{else}}ZERO{{end}}", "1", tVal, true}, - {"with 0", "{{with 0}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"with 1.5", "{{with 1.5}}{{.}}{{else}}ZERO{{end}}", "1.5", tVal, true}, - {"with 0.0", "{{with .FloatZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"with 1.5i", "{{with 1.5i}}{{.}}{{else}}ZERO{{end}}", "(0+1.5i)", tVal, true}, - {"with 0.0i", "{{with .ComplexZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"with emptystring", "{{with ``}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"with string", "{{with `notempty`}}{{.}}{{else}}EMPTY{{end}}", "notempty", tVal, true}, - {"with emptyslice", "{{with .SIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"with slice", "{{with .SI}}{{.}}{{else}}EMPTY{{end}}", "[3 4 5]", tVal, true}, - {"with emptymap", "{{with .MSIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"with map", "{{with .MSIone}}{{.}}{{else}}EMPTY{{end}}", "map[one:1]", tVal, true}, - {"with empty interface, struct field", "{{with .Empty4}}{{.V}}{{end}}", "UinEmpty", tVal, true}, - {"with $x int", "{{with $x := .I}}{{$x}}{{end}}", "17", tVal, true}, - {"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true}, - {"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true}, - - // Range. - {"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true}, - {"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true}, - {"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true}, - {"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true}, - {"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true}, - {"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true}, - {"range empty map no else", "{{range .MSIEmpty}}-{{.}}-{{end}}", "", tVal, true}, - {"range map else", "{{range .MSI}}-{{.}}-{{else}}EMPTY{{end}}", "-1--3--2-", tVal, true}, - {"range empty map else", "{{range .MSIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"range empty interface", "{{range .Empty3}}-{{.}}-{{else}}EMPTY{{end}}", "-7--8-", tVal, true}, - {"range empty nil", "{{range .Empty0}}-{{.}}-{{end}}", "", tVal, true}, - {"range $x SI", "{{range $x := .SI}}<{{$x}}>{{end}}", "<3><4><5>", tVal, true}, - {"range $x $y SI", "{{range $x, $y := .SI}}<{{$x}}={{$y}}>{{end}}", "<0=3><1=4><2=5>", tVal, true}, - {"range $x MSIone", "{{range $x := .MSIone}}<{{$x}}>{{end}}", "<1>", tVal, true}, - {"range $x $y MSIone", "{{range $x, $y := .MSIone}}<{{$x}}={{$y}}>{{end}}", "", tVal, true}, - {"range $x PSI", "{{range $x := .PSI}}<{{$x}}>{{end}}", "<21><22><23>", tVal, true}, - {"declare in range", "{{range $x := .PSI}}<{{$foo:=$x}}{{$x}}>{{end}}", "<21><22><23>", tVal, true}, - {"range count", `{{range $i, $x := count 5}}[{{$i}}]{{$x}}{{end}}`, "[0]a[1]b[2]c[3]d[4]e", tVal, true}, - {"range nil count", `{{range $i, $x := count 0}}{{else}}empty{{end}}`, "empty", tVal, true}, - - // Cute examples. - {"or as if true", `{{or .SI "slice is empty"}}`, "[3 4 5]", tVal, true}, - {"or as if false", `{{or .SIEmpty "slice is empty"}}`, "slice is empty", tVal, true}, - - // Error handling. - {"error method, error", "{{.MyError true}}", "", tVal, false}, - {"error method, no error", "{{.MyError false}}", "false", tVal, true}, - - // Fixed bugs. - // Must separate dot and receiver; otherwise args are evaluated with dot set to variable. - {"bug0", "{{range .MSIone}}{{if $.Method1 .}}X{{end}}{{end}}", "X", tVal, true}, - // Do not loop endlessly in indirect for non-empty interfaces. - // The bug appears with *interface only; looped forever. - {"bug1", "{{.Method0}}", "M0", &iVal, true}, - // Was taking address of interface field, so method set was empty. - {"bug2", "{{$.NonEmptyInterface.Method0}}", "M0", tVal, true}, - // Struct values were not legal in with - mere oversight. - {"bug3", "{{with $}}{{.Method0}}{{end}}", "M0", tVal, true}, - // Nil interface values in if. - {"bug4", "{{if .Empty0}}non-nil{{else}}nil{{end}}", "nil", tVal, true}, - // Stringer. - {"bug5", "{{.Str}}", "foozle", tVal, true}, - {"bug5a", "{{.Err}}", "erroozle", tVal, true}, - // Args need to be indirected and dereferenced sometimes. - {"bug6a", "{{vfunc .V0 .V1}}", "vfunc", tVal, true}, - {"bug6b", "{{vfunc .V0 .V0}}", "vfunc", tVal, true}, - {"bug6c", "{{vfunc .V1 .V0}}", "vfunc", tVal, true}, - {"bug6d", "{{vfunc .V1 .V1}}", "vfunc", tVal, true}, - // Legal parse but illegal execution: non-function should have no arguments. - {"bug7a", "{{3 2}}", "", tVal, false}, - {"bug7b", "{{$x := 1}}{{$x 2}}", "", tVal, false}, - {"bug7c", "{{$x := 1}}{{3 | $x}}", "", tVal, false}, - // Pipelined arg was not being type-checked. - {"bug8a", "{{3|oneArg}}", "", tVal, false}, - {"bug8b", "{{4|dddArg 3}}", "", tVal, false}, - // A bug was introduced that broke map lookups for lower-case names. - {"bug9", "{{.cause}}", "neglect", map[string]string{"cause": "neglect"}, true}, - // Field chain starting with function did not work. - {"bug10", "{{mapOfThree.three}}-{{(mapOfThree).three}}", "3-3", 0, true}, - // Dereferencing nil pointer while evaluating function arguments should not panic. Issue 7333. - {"bug11", "{{valueString .PS}}", "", T{}, false}, - // 0xef gave constant type float64. Issue 8622. - {"bug12xe", "{{printf `%T` 0xef}}", "int", T{}, true}, - {"bug12xE", "{{printf `%T` 0xEE}}", "int", T{}, true}, - {"bug12Xe", "{{printf `%T` 0Xef}}", "int", T{}, true}, - {"bug12XE", "{{printf `%T` 0XEE}}", "int", T{}, true}, - // Chained nodes did not work as arguments. Issue 8473. - {"bug13", "{{print (.Copy).I}}", "17", tVal, true}, -} - -func zeroArgs() string { - return "zeroArgs" -} - -func oneArg(a string) string { - return "oneArg=" + a -} - -func dddArg(a int, b ...string) string { - return fmt.Sprintln(a, b) -} - -// count returns a channel that will deliver n sequential 1-letter strings starting at "a" -func count(n int) chan string { - if n == 0 { - return nil - } - c := make(chan string) - go func() { - for i := 0; i < n; i++ { - c <- "abcdefghijklmnop"[i : i+1] - } - close(c) - }() - return c -} - -// vfunc takes a *V and a V -func vfunc(V, *V) string { - return "vfunc" -} - -// valueString takes a string, not a pointer. -func valueString(v string) string { - return "value is ignored" -} - -func add(args ...int) int { - sum := 0 - for _, x := range args { - sum += x - } - return sum -} - -func echo(arg interface{}) interface{} { - return arg -} - -func makemap(arg ...string) map[string]string { - if len(arg)%2 != 0 { - panic("bad makemap") - } - m := make(map[string]string) - for i := 0; i < len(arg); i += 2 { - m[arg[i]] = arg[i+1] - } - return m -} - -func stringer(s fmt.Stringer) string { - return s.String() -} - -func mapOfThree() interface{} { - return map[string]int{"three": 3} -} - -func testExecute(execTests []execTest, template *Template, t *testing.T) { - b := new(bytes.Buffer) - funcs := FuncMap{ - "add": add, - "count": count, - "dddArg": dddArg, - "echo": echo, - "makemap": makemap, - "mapOfThree": mapOfThree, - "oneArg": oneArg, - "stringer": stringer, - "typeOf": typeOf, - "valueString": valueString, - "vfunc": vfunc, - "zeroArgs": zeroArgs, - } - for _, test := range execTests { - var tmpl *Template - var err error - if template == nil { - tmpl, err = New(test.name).Funcs(funcs).Parse(test.input) - } else { - tmpl, err = template.New(test.name).Funcs(funcs).Parse(test.input) - } - if err != nil { - t.Errorf("%s: parse error: %s", test.name, err) - continue - } - b.Reset() - err = tmpl.Execute(b, test.data) - switch { - case !test.ok && err == nil: - t.Errorf("%s: expected error; got none", test.name) - continue - case test.ok && err != nil: - t.Errorf("%s: unexpected execute error: %s", test.name, err) - continue - case !test.ok && err != nil: - // expected error, got one - if *debug { - fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err) - } - } - result := b.String() - if result != test.output { - t.Errorf("%s: expected\n\t%q\ngot\n\t%q", test.name, test.output, result) - } - } -} - -func TestExecute(t *testing.T) { - testExecute(execTests, nil, t) -} - -var delimPairs = []string{ - "", "", // default - "{{", "}}", // same as default - "<<", ">>", // distinct - "|", "|", // same - "(æ—¥)", "(本)", // peculiar -} - -func TestDelims(t *testing.T) { - const hello = "Hello, world" - var value = struct{ Str string }{hello} - for i := 0; i < len(delimPairs); i += 2 { - text := ".Str" - left := delimPairs[i+0] - trueLeft := left - right := delimPairs[i+1] - trueRight := right - if left == "" { // default case - trueLeft = "{{" - } - if right == "" { // default case - trueRight = "}}" - } - text = trueLeft + text + trueRight - // Now add a comment - text += trueLeft + "/*comment*/" + trueRight - // Now add an action containing a string. - text += trueLeft + `"` + trueLeft + `"` + trueRight - // At this point text looks like `{{.Str}}{{/*comment*/}}{{"{{"}}`. - tmpl, err := New("delims").Delims(left, right).Parse(text) - if err != nil { - t.Fatalf("delim %q text %q parse err %s", left, text, err) - } - var b = new(bytes.Buffer) - err = tmpl.Execute(b, value) - if err != nil { - t.Fatalf("delim %q exec err %s", left, err) - } - if b.String() != hello+trueLeft { - t.Errorf("expected %q got %q", hello+trueLeft, b.String()) - } - } -} - -// Check that an error from a method flows back to the top. -func TestExecuteError(t *testing.T) { - b := new(bytes.Buffer) - tmpl := New("error") - _, err := tmpl.Parse("{{.MyError true}}") - if err != nil { - t.Fatalf("parse error: %s", err) - } - err = tmpl.Execute(b, tVal) - if err == nil { - t.Errorf("expected error; got none") - } else if !strings.Contains(err.Error(), myError.Error()) { - if *debug { - fmt.Printf("test execute error: %s\n", err) - } - t.Errorf("expected myError; got %s", err) - } -} - -const execErrorText = `line 1 -line 2 -line 3 -{{template "one" .}} -{{define "one"}}{{template "two" .}}{{end}} -{{define "two"}}{{template "three" .}}{{end}} -{{define "three"}}{{index "hi" $}}{{end}}` - -// Check that an error from a nested template contains all the relevant information. -func TestExecError(t *testing.T) { - tmpl, err := New("top").Parse(execErrorText) - if err != nil { - t.Fatal("parse error:", err) - } - var b bytes.Buffer - err = tmpl.Execute(&b, 5) // 5 is out of range indexing "hi" - if err == nil { - t.Fatal("expected error") - } - const want = `template: top:7:20: executing "three" at : error calling index: index out of range: 5` - got := err.Error() - if got != want { - t.Errorf("expected\n%q\ngot\n%q", want, got) - } -} - -func TestJSEscaping(t *testing.T) { - testCases := []struct { - in, exp string - }{ - {`a`, `a`}, - {`'foo`, `\'foo`}, - {`Go "jump" \`, `Go \"jump\" \\`}, - {`Yukihiro says "今日ã¯ä¸–ç•Œ"`, `Yukihiro says \"今日ã¯ä¸–ç•Œ\"`}, - {"unprintable \uFDFF", `unprintable \uFDFF`}, - {``, `\x3Chtml\x3E`}, - } - for _, tc := range testCases { - s := JSEscapeString(tc.in) - if s != tc.exp { - t.Errorf("JS escaping [%s] got [%s] want [%s]", tc.in, s, tc.exp) - } - } -} - -// A nice example: walk a binary tree. - -type Tree struct { - Val int - Left, Right *Tree -} - -// Use different delimiters to test Set.Delims. -const treeTemplate = ` - (define "tree") - [ - (.Val) - (with .Left) - (template "tree" .) - (end) - (with .Right) - (template "tree" .) - (end) - ] - (end) -` - -func TestTree(t *testing.T) { - var tree = &Tree{ - 1, - &Tree{ - 2, &Tree{ - 3, - &Tree{ - 4, nil, nil, - }, - nil, - }, - &Tree{ - 5, - &Tree{ - 6, nil, nil, - }, - nil, - }, - }, - &Tree{ - 7, - &Tree{ - 8, - &Tree{ - 9, nil, nil, - }, - nil, - }, - &Tree{ - 10, - &Tree{ - 11, nil, nil, - }, - nil, - }, - }, - } - tmpl, err := New("root").Delims("(", ")").Parse(treeTemplate) - if err != nil { - t.Fatal("parse error:", err) - } - var b bytes.Buffer - stripSpace := func(r rune) rune { - if r == '\t' || r == '\n' { - return -1 - } - return r - } - const expect = "[1[2[3[4]][5[6]]][7[8[9]][10[11]]]]" - // First by looking up the template. - err = tmpl.Lookup("tree").Execute(&b, tree) - if err != nil { - t.Fatal("exec error:", err) - } - result := strings.Map(stripSpace, b.String()) - if result != expect { - t.Errorf("expected %q got %q", expect, result) - } - // Then direct to execution. - b.Reset() - err = tmpl.ExecuteTemplate(&b, "tree", tree) - if err != nil { - t.Fatal("exec error:", err) - } - result = strings.Map(stripSpace, b.String()) - if result != expect { - t.Errorf("expected %q got %q", expect, result) - } -} - -func TestExecuteOnNewTemplate(t *testing.T) { - // This is issue 3872. - _ = New("Name").Templates() -} - -const testTemplates = `{{define "one"}}one{{end}}{{define "two"}}two{{end}}` - -func TestMessageForExecuteEmpty(t *testing.T) { - // Test a truly empty template. - tmpl := New("empty") - var b bytes.Buffer - err := tmpl.Execute(&b, 0) - if err == nil { - t.Fatal("expected initial error") - } - got := err.Error() - want := `template: empty: "empty" is an incomplete or empty template` - if got != want { - t.Errorf("expected error %s got %s", want, got) - } - // Add a non-empty template to check that the error is helpful. - tests, err := New("").Parse(testTemplates) - if err != nil { - t.Fatal(err) - } - tmpl.AddParseTree("secondary", tests.Tree) - err = tmpl.Execute(&b, 0) - if err == nil { - t.Fatal("expected second error") - } - got = err.Error() - want = `template: empty: "empty" is an incomplete or empty template; defined templates are: "secondary"` - if got != want { - t.Errorf("expected error %s got %s", want, got) - } - // Make sure we can execute the secondary. - err = tmpl.ExecuteTemplate(&b, "secondary", 0) - if err != nil { - t.Fatal(err) - } -} - -func TestFinalForPrintf(t *testing.T) { - tmpl, err := New("").Parse(`{{"x" | printf}}`) - if err != nil { - t.Fatal(err) - } - var b bytes.Buffer - err = tmpl.Execute(&b, 0) - if err != nil { - t.Fatal(err) - } -} - -type cmpTest struct { - expr string - truth string - ok bool -} - -var cmpTests = []cmpTest{ - {"eq true true", "true", true}, - {"eq true false", "false", true}, - {"eq 1+2i 1+2i", "true", true}, - {"eq 1+2i 1+3i", "false", true}, - {"eq 1.5 1.5", "true", true}, - {"eq 1.5 2.5", "false", true}, - {"eq 1 1", "true", true}, - {"eq 1 2", "false", true}, - {"eq `xy` `xy`", "true", true}, - {"eq `xy` `xyz`", "false", true}, - {"eq .Uthree .Uthree", "true", true}, - {"eq .Uthree .Ufour", "false", true}, - {"eq 3 4 5 6 3", "true", true}, - {"eq 3 4 5 6 7", "false", true}, - {"ne true true", "false", true}, - {"ne true false", "true", true}, - {"ne 1+2i 1+2i", "false", true}, - {"ne 1+2i 1+3i", "true", true}, - {"ne 1.5 1.5", "false", true}, - {"ne 1.5 2.5", "true", true}, - {"ne 1 1", "false", true}, - {"ne 1 2", "true", true}, - {"ne `xy` `xy`", "false", true}, - {"ne `xy` `xyz`", "true", true}, - {"ne .Uthree .Uthree", "false", true}, - {"ne .Uthree .Ufour", "true", true}, - {"lt 1.5 1.5", "false", true}, - {"lt 1.5 2.5", "true", true}, - {"lt 1 1", "false", true}, - {"lt 1 2", "true", true}, - {"lt `xy` `xy`", "false", true}, - {"lt `xy` `xyz`", "true", true}, - {"lt .Uthree .Uthree", "false", true}, - {"lt .Uthree .Ufour", "true", true}, - {"le 1.5 1.5", "true", true}, - {"le 1.5 2.5", "true", true}, - {"le 2.5 1.5", "false", true}, - {"le 1 1", "true", true}, - {"le 1 2", "true", true}, - {"le 2 1", "false", true}, - {"le `xy` `xy`", "true", true}, - {"le `xy` `xyz`", "true", true}, - {"le `xyz` `xy`", "false", true}, - {"le .Uthree .Uthree", "true", true}, - {"le .Uthree .Ufour", "true", true}, - {"le .Ufour .Uthree", "false", true}, - {"gt 1.5 1.5", "false", true}, - {"gt 1.5 2.5", "false", true}, - {"gt 1 1", "false", true}, - {"gt 2 1", "true", true}, - {"gt 1 2", "false", true}, - {"gt `xy` `xy`", "false", true}, - {"gt `xy` `xyz`", "false", true}, - {"gt .Uthree .Uthree", "false", true}, - {"gt .Uthree .Ufour", "false", true}, - {"gt .Ufour .Uthree", "true", true}, - {"ge 1.5 1.5", "true", true}, - {"ge 1.5 2.5", "false", true}, - {"ge 2.5 1.5", "true", true}, - {"ge 1 1", "true", true}, - {"ge 1 2", "false", true}, - {"ge 2 1", "true", true}, - {"ge `xy` `xy`", "true", true}, - {"ge `xy` `xyz`", "false", true}, - {"ge `xyz` `xy`", "true", true}, - {"ge .Uthree .Uthree", "true", true}, - {"ge .Uthree .Ufour", "false", true}, - {"ge .Ufour .Uthree", "true", true}, - // Mixing signed and unsigned integers. - {"eq .Uthree .Three", "true", true}, - {"eq .Three .Uthree", "true", true}, - {"le .Uthree .Three", "true", true}, - {"le .Three .Uthree", "true", true}, - {"ge .Uthree .Three", "true", true}, - {"ge .Three .Uthree", "true", true}, - {"lt .Uthree .Three", "false", true}, - {"lt .Three .Uthree", "false", true}, - {"gt .Uthree .Three", "false", true}, - {"gt .Three .Uthree", "false", true}, - {"eq .Ufour .Three", "false", true}, - {"lt .Ufour .Three", "false", true}, - {"gt .Ufour .Three", "true", true}, - {"eq .NegOne .Uthree", "false", true}, - {"eq .Uthree .NegOne", "false", true}, - {"ne .NegOne .Uthree", "true", true}, - {"ne .Uthree .NegOne", "true", true}, - {"lt .NegOne .Uthree", "true", true}, - {"lt .Uthree .NegOne", "false", true}, - {"le .NegOne .Uthree", "true", true}, - {"le .Uthree .NegOne", "false", true}, - {"gt .NegOne .Uthree", "false", true}, - {"gt .Uthree .NegOne", "true", true}, - {"ge .NegOne .Uthree", "false", true}, - {"ge .Uthree .NegOne", "true", true}, - {"eq (index `x` 0) 'x'", "true", true}, // The example that triggered this rule. - {"eq (index `x` 0) 'y'", "false", true}, - // Errors - {"eq `xy` 1", "", false}, // Different types. - {"eq 2 2.0", "", false}, // Different types. - {"lt true true", "", false}, // Unordered types. - {"lt 1+0i 1+0i", "", false}, // Unordered types. -} - -func TestComparison(t *testing.T) { - b := new(bytes.Buffer) - var cmpStruct = struct { - Uthree, Ufour uint - NegOne, Three int - }{3, 4, -1, 3} - for _, test := range cmpTests { - text := fmt.Sprintf("{{if %s}}true{{else}}false{{end}}", test.expr) - tmpl, err := New("empty").Parse(text) - if err != nil { - t.Fatalf("%q: %s", test.expr, err) - } - b.Reset() - err = tmpl.Execute(b, &cmpStruct) - if test.ok && err != nil { - t.Errorf("%s errored incorrectly: %s", test.expr, err) - continue - } - if !test.ok && err == nil { - t.Errorf("%s did not error", test.expr) - continue - } - if b.String() != test.truth { - t.Errorf("%s: want %s; got %s", test.expr, test.truth, b.String()) - } - } -} diff --git a/vendor/github.com/alecthomas/template/go.mod b/vendor/github.com/alecthomas/template/go.mod new file mode 100644 index 00000000..a70670ae --- /dev/null +++ b/vendor/github.com/alecthomas/template/go.mod @@ -0,0 +1 @@ +module github.com/alecthomas/template diff --git a/vendor/github.com/alecthomas/template/multi_test.go b/vendor/github.com/alecthomas/template/multi_test.go deleted file mode 100644 index 8d103623..00000000 --- a/vendor/github.com/alecthomas/template/multi_test.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -// Tests for mulitple-template parsing and execution. - -import ( - "bytes" - "fmt" - "strings" - "testing" - - "github.com/alecthomas/template/parse" -) - -const ( - noError = true - hasError = false -) - -type multiParseTest struct { - name string - input string - ok bool - names []string - results []string -} - -var multiParseTests = []multiParseTest{ - {"empty", "", noError, - nil, - nil}, - {"one", `{{define "foo"}} FOO {{end}}`, noError, - []string{"foo"}, - []string{" FOO "}}, - {"two", `{{define "foo"}} FOO {{end}}{{define "bar"}} BAR {{end}}`, noError, - []string{"foo", "bar"}, - []string{" FOO ", " BAR "}}, - // errors - {"missing end", `{{define "foo"}} FOO `, hasError, - nil, - nil}, - {"malformed name", `{{define "foo}} FOO `, hasError, - nil, - nil}, -} - -func TestMultiParse(t *testing.T) { - for _, test := range multiParseTests { - template, err := New("root").Parse(test.input) - switch { - case err == nil && !test.ok: - t.Errorf("%q: expected error; got none", test.name) - continue - case err != nil && test.ok: - t.Errorf("%q: unexpected error: %v", test.name, err) - continue - case err != nil && !test.ok: - // expected error, got one - if *debug { - fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err) - } - continue - } - if template == nil { - continue - } - if len(template.tmpl) != len(test.names)+1 { // +1 for root - t.Errorf("%s: wrong number of templates; wanted %d got %d", test.name, len(test.names), len(template.tmpl)) - continue - } - for i, name := range test.names { - tmpl, ok := template.tmpl[name] - if !ok { - t.Errorf("%s: can't find template %q", test.name, name) - continue - } - result := tmpl.Root.String() - if result != test.results[i] { - t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.results[i]) - } - } - } -} - -var multiExecTests = []execTest{ - {"empty", "", "", nil, true}, - {"text", "some text", "some text", nil, true}, - {"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true}, - {"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true}, - {"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true}, - {"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true}, - {"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true}, - {"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true}, - {"variable declared by template", `{{template "nested" $x:=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true}, - - // User-defined function: test argument evaluator. - {"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true}, - {"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true}, -} - -// These strings are also in testdata/*. -const multiText1 = ` - {{define "x"}}TEXT{{end}} - {{define "dotV"}}{{.V}}{{end}} -` - -const multiText2 = ` - {{define "dot"}}{{.}}{{end}} - {{define "nested"}}{{template "dot" .}}{{end}} -` - -func TestMultiExecute(t *testing.T) { - // Declare a couple of templates first. - template, err := New("root").Parse(multiText1) - if err != nil { - t.Fatalf("parse error for 1: %s", err) - } - _, err = template.Parse(multiText2) - if err != nil { - t.Fatalf("parse error for 2: %s", err) - } - testExecute(multiExecTests, template, t) -} - -func TestParseFiles(t *testing.T) { - _, err := ParseFiles("DOES NOT EXIST") - if err == nil { - t.Error("expected error for non-existent file; got none") - } - template := New("root") - _, err = template.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl") - if err != nil { - t.Fatalf("error parsing files: %v", err) - } - testExecute(multiExecTests, template, t) -} - -func TestParseGlob(t *testing.T) { - _, err := ParseGlob("DOES NOT EXIST") - if err == nil { - t.Error("expected error for non-existent file; got none") - } - _, err = New("error").ParseGlob("[x") - if err == nil { - t.Error("expected error for bad pattern; got none") - } - template := New("root") - _, err = template.ParseGlob("testdata/file*.tmpl") - if err != nil { - t.Fatalf("error parsing files: %v", err) - } - testExecute(multiExecTests, template, t) -} - -// In these tests, actual content (not just template definitions) comes from the parsed files. - -var templateFileExecTests = []execTest{ - {"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\n\ny\ntemplate2\n\nx\n", 0, true}, -} - -func TestParseFilesWithData(t *testing.T) { - template, err := New("root").ParseFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl") - if err != nil { - t.Fatalf("error parsing files: %v", err) - } - testExecute(templateFileExecTests, template, t) -} - -func TestParseGlobWithData(t *testing.T) { - template, err := New("root").ParseGlob("testdata/tmpl*.tmpl") - if err != nil { - t.Fatalf("error parsing files: %v", err) - } - testExecute(templateFileExecTests, template, t) -} - -const ( - cloneText1 = `{{define "a"}}{{template "b"}}{{template "c"}}{{end}}` - cloneText2 = `{{define "b"}}b{{end}}` - cloneText3 = `{{define "c"}}root{{end}}` - cloneText4 = `{{define "c"}}clone{{end}}` -) - -func TestClone(t *testing.T) { - // Create some templates and clone the root. - root, err := New("root").Parse(cloneText1) - if err != nil { - t.Fatal(err) - } - _, err = root.Parse(cloneText2) - if err != nil { - t.Fatal(err) - } - clone := Must(root.Clone()) - // Add variants to both. - _, err = root.Parse(cloneText3) - if err != nil { - t.Fatal(err) - } - _, err = clone.Parse(cloneText4) - if err != nil { - t.Fatal(err) - } - // Verify that the clone is self-consistent. - for k, v := range clone.tmpl { - if k == clone.name && v.tmpl[k] != clone { - t.Error("clone does not contain root") - } - if v != v.tmpl[v.name] { - t.Errorf("clone does not contain self for %q", k) - } - } - // Execute root. - var b bytes.Buffer - err = root.ExecuteTemplate(&b, "a", 0) - if err != nil { - t.Fatal(err) - } - if b.String() != "broot" { - t.Errorf("expected %q got %q", "broot", b.String()) - } - // Execute copy. - b.Reset() - err = clone.ExecuteTemplate(&b, "a", 0) - if err != nil { - t.Fatal(err) - } - if b.String() != "bclone" { - t.Errorf("expected %q got %q", "bclone", b.String()) - } -} - -func TestAddParseTree(t *testing.T) { - // Create some templates. - root, err := New("root").Parse(cloneText1) - if err != nil { - t.Fatal(err) - } - _, err = root.Parse(cloneText2) - if err != nil { - t.Fatal(err) - } - // Add a new parse tree. - tree, err := parse.Parse("cloneText3", cloneText3, "", "", nil, builtins) - if err != nil { - t.Fatal(err) - } - added, err := root.AddParseTree("c", tree["c"]) - // Execute. - var b bytes.Buffer - err = added.ExecuteTemplate(&b, "a", 0) - if err != nil { - t.Fatal(err) - } - if b.String() != "broot" { - t.Errorf("expected %q got %q", "broot", b.String()) - } -} - -// Issue 7032 -func TestAddParseTreeToUnparsedTemplate(t *testing.T) { - master := "{{define \"master\"}}{{end}}" - tmpl := New("master") - tree, err := parse.Parse("master", master, "", "", nil) - if err != nil { - t.Fatalf("unexpected parse err: %v", err) - } - masterTree := tree["master"] - tmpl.AddParseTree("master", masterTree) // used to panic -} - -func TestRedefinition(t *testing.T) { - var tmpl *Template - var err error - if tmpl, err = New("tmpl1").Parse(`{{define "test"}}foo{{end}}`); err != nil { - t.Fatalf("parse 1: %v", err) - } - if _, err = tmpl.Parse(`{{define "test"}}bar{{end}}`); err == nil { - t.Fatal("expected error") - } - if !strings.Contains(err.Error(), "redefinition") { - t.Fatalf("expected redefinition error; got %v", err) - } - if _, err = tmpl.New("tmpl2").Parse(`{{define "test"}}bar{{end}}`); err == nil { - t.Fatal("expected error") - } - if !strings.Contains(err.Error(), "redefinition") { - t.Fatalf("expected redefinition error; got %v", err) - } -} diff --git a/vendor/github.com/alecthomas/template/parse/lex_test.go b/vendor/github.com/alecthomas/template/parse/lex_test.go deleted file mode 100644 index 3b921076..00000000 --- a/vendor/github.com/alecthomas/template/parse/lex_test.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package parse - -import ( - "fmt" - "testing" -) - -// Make the types prettyprint. -var itemName = map[itemType]string{ - itemError: "error", - itemBool: "bool", - itemChar: "char", - itemCharConstant: "charconst", - itemComplex: "complex", - itemColonEquals: ":=", - itemEOF: "EOF", - itemField: "field", - itemIdentifier: "identifier", - itemLeftDelim: "left delim", - itemLeftParen: "(", - itemNumber: "number", - itemPipe: "pipe", - itemRawString: "raw string", - itemRightDelim: "right delim", - itemElideNewline: "elide newline", - itemRightParen: ")", - itemSpace: "space", - itemString: "string", - itemVariable: "variable", - - // keywords - itemDot: ".", - itemDefine: "define", - itemElse: "else", - itemIf: "if", - itemEnd: "end", - itemNil: "nil", - itemRange: "range", - itemTemplate: "template", - itemWith: "with", -} - -func (i itemType) String() string { - s := itemName[i] - if s == "" { - return fmt.Sprintf("item%d", int(i)) - } - return s -} - -type lexTest struct { - name string - input string - items []item -} - -var ( - tEOF = item{itemEOF, 0, ""} - tFor = item{itemIdentifier, 0, "for"} - tLeft = item{itemLeftDelim, 0, "{{"} - tLpar = item{itemLeftParen, 0, "("} - tPipe = item{itemPipe, 0, "|"} - tQuote = item{itemString, 0, `"abc \n\t\" "`} - tRange = item{itemRange, 0, "range"} - tRight = item{itemRightDelim, 0, "}}"} - tElideNewline = item{itemElideNewline, 0, "\\"} - tRpar = item{itemRightParen, 0, ")"} - tSpace = item{itemSpace, 0, " "} - raw = "`" + `abc\n\t\" ` + "`" - tRawQuote = item{itemRawString, 0, raw} -) - -var lexTests = []lexTest{ - {"empty", "", []item{tEOF}}, - {"spaces", " \t\n", []item{{itemText, 0, " \t\n"}, tEOF}}, - {"text", `now is the time`, []item{{itemText, 0, "now is the time"}, tEOF}}, - {"elide newline", "{{}}\\", []item{tLeft, tRight, tElideNewline, tEOF}}, - {"text with comment", "hello-{{/* this is a comment */}}-world", []item{ - {itemText, 0, "hello-"}, - {itemText, 0, "-world"}, - tEOF, - }}, - {"punctuation", "{{,@% }}", []item{ - tLeft, - {itemChar, 0, ","}, - {itemChar, 0, "@"}, - {itemChar, 0, "%"}, - tSpace, - tRight, - tEOF, - }}, - {"parens", "{{((3))}}", []item{ - tLeft, - tLpar, - tLpar, - {itemNumber, 0, "3"}, - tRpar, - tRpar, - tRight, - tEOF, - }}, - {"empty action", `{{}}`, []item{tLeft, tRight, tEOF}}, - {"for", `{{for}}`, []item{tLeft, tFor, tRight, tEOF}}, - {"quote", `{{"abc \n\t\" "}}`, []item{tLeft, tQuote, tRight, tEOF}}, - {"raw quote", "{{" + raw + "}}", []item{tLeft, tRawQuote, tRight, tEOF}}, - {"numbers", "{{1 02 0x14 -7.2i 1e3 +1.2e-4 4.2i 1+2i}}", []item{ - tLeft, - {itemNumber, 0, "1"}, - tSpace, - {itemNumber, 0, "02"}, - tSpace, - {itemNumber, 0, "0x14"}, - tSpace, - {itemNumber, 0, "-7.2i"}, - tSpace, - {itemNumber, 0, "1e3"}, - tSpace, - {itemNumber, 0, "+1.2e-4"}, - tSpace, - {itemNumber, 0, "4.2i"}, - tSpace, - {itemComplex, 0, "1+2i"}, - tRight, - tEOF, - }}, - {"characters", `{{'a' '\n' '\'' '\\' '\u00FF' '\xFF' '本'}}`, []item{ - tLeft, - {itemCharConstant, 0, `'a'`}, - tSpace, - {itemCharConstant, 0, `'\n'`}, - tSpace, - {itemCharConstant, 0, `'\''`}, - tSpace, - {itemCharConstant, 0, `'\\'`}, - tSpace, - {itemCharConstant, 0, `'\u00FF'`}, - tSpace, - {itemCharConstant, 0, `'\xFF'`}, - tSpace, - {itemCharConstant, 0, `'本'`}, - tRight, - tEOF, - }}, - {"bools", "{{true false}}", []item{ - tLeft, - {itemBool, 0, "true"}, - tSpace, - {itemBool, 0, "false"}, - tRight, - tEOF, - }}, - {"dot", "{{.}}", []item{ - tLeft, - {itemDot, 0, "."}, - tRight, - tEOF, - }}, - {"nil", "{{nil}}", []item{ - tLeft, - {itemNil, 0, "nil"}, - tRight, - tEOF, - }}, - {"dots", "{{.x . .2 .x.y.z}}", []item{ - tLeft, - {itemField, 0, ".x"}, - tSpace, - {itemDot, 0, "."}, - tSpace, - {itemNumber, 0, ".2"}, - tSpace, - {itemField, 0, ".x"}, - {itemField, 0, ".y"}, - {itemField, 0, ".z"}, - tRight, - tEOF, - }}, - {"keywords", "{{range if else end with}}", []item{ - tLeft, - {itemRange, 0, "range"}, - tSpace, - {itemIf, 0, "if"}, - tSpace, - {itemElse, 0, "else"}, - tSpace, - {itemEnd, 0, "end"}, - tSpace, - {itemWith, 0, "with"}, - tRight, - tEOF, - }}, - {"variables", "{{$c := printf $ $hello $23 $ $var.Field .Method}}", []item{ - tLeft, - {itemVariable, 0, "$c"}, - tSpace, - {itemColonEquals, 0, ":="}, - tSpace, - {itemIdentifier, 0, "printf"}, - tSpace, - {itemVariable, 0, "$"}, - tSpace, - {itemVariable, 0, "$hello"}, - tSpace, - {itemVariable, 0, "$23"}, - tSpace, - {itemVariable, 0, "$"}, - tSpace, - {itemVariable, 0, "$var"}, - {itemField, 0, ".Field"}, - tSpace, - {itemField, 0, ".Method"}, - tRight, - tEOF, - }}, - {"variable invocation", "{{$x 23}}", []item{ - tLeft, - {itemVariable, 0, "$x"}, - tSpace, - {itemNumber, 0, "23"}, - tRight, - tEOF, - }}, - {"pipeline", `intro {{echo hi 1.2 |noargs|args 1 "hi"}} outro`, []item{ - {itemText, 0, "intro "}, - tLeft, - {itemIdentifier, 0, "echo"}, - tSpace, - {itemIdentifier, 0, "hi"}, - tSpace, - {itemNumber, 0, "1.2"}, - tSpace, - tPipe, - {itemIdentifier, 0, "noargs"}, - tPipe, - {itemIdentifier, 0, "args"}, - tSpace, - {itemNumber, 0, "1"}, - tSpace, - {itemString, 0, `"hi"`}, - tRight, - {itemText, 0, " outro"}, - tEOF, - }}, - {"declaration", "{{$v := 3}}", []item{ - tLeft, - {itemVariable, 0, "$v"}, - tSpace, - {itemColonEquals, 0, ":="}, - tSpace, - {itemNumber, 0, "3"}, - tRight, - tEOF, - }}, - {"2 declarations", "{{$v , $w := 3}}", []item{ - tLeft, - {itemVariable, 0, "$v"}, - tSpace, - {itemChar, 0, ","}, - tSpace, - {itemVariable, 0, "$w"}, - tSpace, - {itemColonEquals, 0, ":="}, - tSpace, - {itemNumber, 0, "3"}, - tRight, - tEOF, - }}, - {"field of parenthesized expression", "{{(.X).Y}}", []item{ - tLeft, - tLpar, - {itemField, 0, ".X"}, - tRpar, - {itemField, 0, ".Y"}, - tRight, - tEOF, - }}, - // errors - {"badchar", "#{{\x01}}", []item{ - {itemText, 0, "#"}, - tLeft, - {itemError, 0, "unrecognized character in action: U+0001"}, - }}, - {"unclosed action", "{{\n}}", []item{ - tLeft, - {itemError, 0, "unclosed action"}, - }}, - {"EOF in action", "{{range", []item{ - tLeft, - tRange, - {itemError, 0, "unclosed action"}, - }}, - {"unclosed quote", "{{\"\n\"}}", []item{ - tLeft, - {itemError, 0, "unterminated quoted string"}, - }}, - {"unclosed raw quote", "{{`xx\n`}}", []item{ - tLeft, - {itemError, 0, "unterminated raw quoted string"}, - }}, - {"unclosed char constant", "{{'\n}}", []item{ - tLeft, - {itemError, 0, "unterminated character constant"}, - }}, - {"bad number", "{{3k}}", []item{ - tLeft, - {itemError, 0, `bad number syntax: "3k"`}, - }}, - {"unclosed paren", "{{(3}}", []item{ - tLeft, - tLpar, - {itemNumber, 0, "3"}, - {itemError, 0, `unclosed left paren`}, - }}, - {"extra right paren", "{{3)}}", []item{ - tLeft, - {itemNumber, 0, "3"}, - tRpar, - {itemError, 0, `unexpected right paren U+0029 ')'`}, - }}, - - // Fixed bugs - // Many elements in an action blew the lookahead until - // we made lexInsideAction not loop. - {"long pipeline deadlock", "{{|||||}}", []item{ - tLeft, - tPipe, - tPipe, - tPipe, - tPipe, - tPipe, - tRight, - tEOF, - }}, - {"text with bad comment", "hello-{{/*/}}-world", []item{ - {itemText, 0, "hello-"}, - {itemError, 0, `unclosed comment`}, - }}, - {"text with comment close separted from delim", "hello-{{/* */ }}-world", []item{ - {itemText, 0, "hello-"}, - {itemError, 0, `comment ends before closing delimiter`}, - }}, - // This one is an error that we can't catch because it breaks templates with - // minimized JavaScript. Should have fixed it before Go 1.1. - {"unmatched right delimiter", "hello-{.}}-world", []item{ - {itemText, 0, "hello-{.}}-world"}, - tEOF, - }}, -} - -// collect gathers the emitted items into a slice. -func collect(t *lexTest, left, right string) (items []item) { - l := lex(t.name, t.input, left, right) - for { - item := l.nextItem() - items = append(items, item) - if item.typ == itemEOF || item.typ == itemError { - break - } - } - return -} - -func equal(i1, i2 []item, checkPos bool) bool { - if len(i1) != len(i2) { - return false - } - for k := range i1 { - if i1[k].typ != i2[k].typ { - return false - } - if i1[k].val != i2[k].val { - return false - } - if checkPos && i1[k].pos != i2[k].pos { - return false - } - } - return true -} - -func TestLex(t *testing.T) { - for _, test := range lexTests { - items := collect(&test, "", "") - if !equal(items, test.items, false) { - t.Errorf("%s: got\n\t%+v\nexpected\n\t%v", test.name, items, test.items) - } - } -} - -// Some easy cases from above, but with delimiters $$ and @@ -var lexDelimTests = []lexTest{ - {"punctuation", "$$,@%{{}}@@", []item{ - tLeftDelim, - {itemChar, 0, ","}, - {itemChar, 0, "@"}, - {itemChar, 0, "%"}, - {itemChar, 0, "{"}, - {itemChar, 0, "{"}, - {itemChar, 0, "}"}, - {itemChar, 0, "}"}, - tRightDelim, - tEOF, - }}, - {"empty action", `$$@@`, []item{tLeftDelim, tRightDelim, tEOF}}, - {"for", `$$for@@`, []item{tLeftDelim, tFor, tRightDelim, tEOF}}, - {"quote", `$$"abc \n\t\" "@@`, []item{tLeftDelim, tQuote, tRightDelim, tEOF}}, - {"raw quote", "$$" + raw + "@@", []item{tLeftDelim, tRawQuote, tRightDelim, tEOF}}, -} - -var ( - tLeftDelim = item{itemLeftDelim, 0, "$$"} - tRightDelim = item{itemRightDelim, 0, "@@"} -) - -func TestDelims(t *testing.T) { - for _, test := range lexDelimTests { - items := collect(&test, "$$", "@@") - if !equal(items, test.items, false) { - t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items) - } - } -} - -var lexPosTests = []lexTest{ - {"empty", "", []item{tEOF}}, - {"punctuation", "{{,@%#}}", []item{ - {itemLeftDelim, 0, "{{"}, - {itemChar, 2, ","}, - {itemChar, 3, "@"}, - {itemChar, 4, "%"}, - {itemChar, 5, "#"}, - {itemRightDelim, 6, "}}"}, - {itemEOF, 8, ""}, - }}, - {"sample", "0123{{hello}}xyz", []item{ - {itemText, 0, "0123"}, - {itemLeftDelim, 4, "{{"}, - {itemIdentifier, 6, "hello"}, - {itemRightDelim, 11, "}}"}, - {itemText, 13, "xyz"}, - {itemEOF, 16, ""}, - }}, -} - -// The other tests don't check position, to make the test cases easier to construct. -// This one does. -func TestPos(t *testing.T) { - for _, test := range lexPosTests { - items := collect(&test, "", "") - if !equal(items, test.items, true) { - t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items) - if len(items) == len(test.items) { - // Detailed print; avoid item.String() to expose the position value. - for i := range items { - if !equal(items[i:i+1], test.items[i:i+1], true) { - i1 := items[i] - i2 := test.items[i] - t.Errorf("\t#%d: got {%v %d %q} expected {%v %d %q}", i, i1.typ, i1.pos, i1.val, i2.typ, i2.pos, i2.val) - } - } - } - } - } -} diff --git a/vendor/github.com/alecthomas/template/parse/parse_test.go b/vendor/github.com/alecthomas/template/parse/parse_test.go deleted file mode 100644 index c73640fb..00000000 --- a/vendor/github.com/alecthomas/template/parse/parse_test.go +++ /dev/null @@ -1,426 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package parse - -import ( - "flag" - "fmt" - "strings" - "testing" -) - -var debug = flag.Bool("debug", false, "show the errors produced by the main tests") - -type numberTest struct { - text string - isInt bool - isUint bool - isFloat bool - isComplex bool - int64 - uint64 - float64 - complex128 -} - -var numberTests = []numberTest{ - // basics - {"0", true, true, true, false, 0, 0, 0, 0}, - {"-0", true, true, true, false, 0, 0, 0, 0}, // check that -0 is a uint. - {"73", true, true, true, false, 73, 73, 73, 0}, - {"073", true, true, true, false, 073, 073, 073, 0}, - {"0x73", true, true, true, false, 0x73, 0x73, 0x73, 0}, - {"-73", true, false, true, false, -73, 0, -73, 0}, - {"+73", true, false, true, false, 73, 0, 73, 0}, - {"100", true, true, true, false, 100, 100, 100, 0}, - {"1e9", true, true, true, false, 1e9, 1e9, 1e9, 0}, - {"-1e9", true, false, true, false, -1e9, 0, -1e9, 0}, - {"-1.2", false, false, true, false, 0, 0, -1.2, 0}, - {"1e19", false, true, true, false, 0, 1e19, 1e19, 0}, - {"-1e19", false, false, true, false, 0, 0, -1e19, 0}, - {"4i", false, false, false, true, 0, 0, 0, 4i}, - {"-1.2+4.2i", false, false, false, true, 0, 0, 0, -1.2 + 4.2i}, - {"073i", false, false, false, true, 0, 0, 0, 73i}, // not octal! - // complex with 0 imaginary are float (and maybe integer) - {"0i", true, true, true, true, 0, 0, 0, 0}, - {"-1.2+0i", false, false, true, true, 0, 0, -1.2, -1.2}, - {"-12+0i", true, false, true, true, -12, 0, -12, -12}, - {"13+0i", true, true, true, true, 13, 13, 13, 13}, - // funny bases - {"0123", true, true, true, false, 0123, 0123, 0123, 0}, - {"-0x0", true, true, true, false, 0, 0, 0, 0}, - {"0xdeadbeef", true, true, true, false, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0}, - // character constants - {`'a'`, true, true, true, false, 'a', 'a', 'a', 0}, - {`'\n'`, true, true, true, false, '\n', '\n', '\n', 0}, - {`'\\'`, true, true, true, false, '\\', '\\', '\\', 0}, - {`'\''`, true, true, true, false, '\'', '\'', '\'', 0}, - {`'\xFF'`, true, true, true, false, 0xFF, 0xFF, 0xFF, 0}, - {`'パ'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0}, - {`'\u30d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0}, - {`'\U000030d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0}, - // some broken syntax - {text: "+-2"}, - {text: "0x123."}, - {text: "1e."}, - {text: "0xi."}, - {text: "1+2."}, - {text: "'x"}, - {text: "'xx'"}, - // Issue 8622 - 0xe parsed as floating point. Very embarrassing. - {"0xef", true, true, true, false, 0xef, 0xef, 0xef, 0}, -} - -func TestNumberParse(t *testing.T) { - for _, test := range numberTests { - // If fmt.Sscan thinks it's complex, it's complex. We can't trust the output - // because imaginary comes out as a number. - var c complex128 - typ := itemNumber - var tree *Tree - if test.text[0] == '\'' { - typ = itemCharConstant - } else { - _, err := fmt.Sscan(test.text, &c) - if err == nil { - typ = itemComplex - } - } - n, err := tree.newNumber(0, test.text, typ) - ok := test.isInt || test.isUint || test.isFloat || test.isComplex - if ok && err != nil { - t.Errorf("unexpected error for %q: %s", test.text, err) - continue - } - if !ok && err == nil { - t.Errorf("expected error for %q", test.text) - continue - } - if !ok { - if *debug { - fmt.Printf("%s\n\t%s\n", test.text, err) - } - continue - } - if n.IsComplex != test.isComplex { - t.Errorf("complex incorrect for %q; should be %t", test.text, test.isComplex) - } - if test.isInt { - if !n.IsInt { - t.Errorf("expected integer for %q", test.text) - } - if n.Int64 != test.int64 { - t.Errorf("int64 for %q should be %d Is %d", test.text, test.int64, n.Int64) - } - } else if n.IsInt { - t.Errorf("did not expect integer for %q", test.text) - } - if test.isUint { - if !n.IsUint { - t.Errorf("expected unsigned integer for %q", test.text) - } - if n.Uint64 != test.uint64 { - t.Errorf("uint64 for %q should be %d Is %d", test.text, test.uint64, n.Uint64) - } - } else if n.IsUint { - t.Errorf("did not expect unsigned integer for %q", test.text) - } - if test.isFloat { - if !n.IsFloat { - t.Errorf("expected float for %q", test.text) - } - if n.Float64 != test.float64 { - t.Errorf("float64 for %q should be %g Is %g", test.text, test.float64, n.Float64) - } - } else if n.IsFloat { - t.Errorf("did not expect float for %q", test.text) - } - if test.isComplex { - if !n.IsComplex { - t.Errorf("expected complex for %q", test.text) - } - if n.Complex128 != test.complex128 { - t.Errorf("complex128 for %q should be %g Is %g", test.text, test.complex128, n.Complex128) - } - } else if n.IsComplex { - t.Errorf("did not expect complex for %q", test.text) - } - } -} - -type parseTest struct { - name string - input string - ok bool - result string // what the user would see in an error message. -} - -const ( - noError = true - hasError = false -) - -var parseTests = []parseTest{ - {"empty", "", noError, - ``}, - {"comment", "{{/*\n\n\n*/}}", noError, - ``}, - {"spaces", " \t\n", noError, - `" \t\n"`}, - {"text", "some text", noError, - `"some text"`}, - {"emptyAction", "{{}}", hasError, - `{{}}`}, - {"field", "{{.X}}", noError, - `{{.X}}`}, - {"simple command", "{{printf}}", noError, - `{{printf}}`}, - {"$ invocation", "{{$}}", noError, - "{{$}}"}, - {"variable invocation", "{{with $x := 3}}{{$x 23}}{{end}}", noError, - "{{with $x := 3}}{{$x 23}}{{end}}"}, - {"variable with fields", "{{$.I}}", noError, - "{{$.I}}"}, - {"multi-word command", "{{printf `%d` 23}}", noError, - "{{printf `%d` 23}}"}, - {"pipeline", "{{.X|.Y}}", noError, - `{{.X | .Y}}`}, - {"pipeline with decl", "{{$x := .X|.Y}}", noError, - `{{$x := .X | .Y}}`}, - {"nested pipeline", "{{.X (.Y .Z) (.A | .B .C) (.E)}}", noError, - `{{.X (.Y .Z) (.A | .B .C) (.E)}}`}, - {"field applied to parentheses", "{{(.Y .Z).Field}}", noError, - `{{(.Y .Z).Field}}`}, - {"simple if", "{{if .X}}hello{{end}}", noError, - `{{if .X}}"hello"{{end}}`}, - {"if with else", "{{if .X}}true{{else}}false{{end}}", noError, - `{{if .X}}"true"{{else}}"false"{{end}}`}, - {"if with else if", "{{if .X}}true{{else if .Y}}false{{end}}", noError, - `{{if .X}}"true"{{else}}{{if .Y}}"false"{{end}}{{end}}`}, - {"if else chain", "+{{if .X}}X{{else if .Y}}Y{{else if .Z}}Z{{end}}+", noError, - `"+"{{if .X}}"X"{{else}}{{if .Y}}"Y"{{else}}{{if .Z}}"Z"{{end}}{{end}}{{end}}"+"`}, - {"simple range", "{{range .X}}hello{{end}}", noError, - `{{range .X}}"hello"{{end}}`}, - {"chained field range", "{{range .X.Y.Z}}hello{{end}}", noError, - `{{range .X.Y.Z}}"hello"{{end}}`}, - {"nested range", "{{range .X}}hello{{range .Y}}goodbye{{end}}{{end}}", noError, - `{{range .X}}"hello"{{range .Y}}"goodbye"{{end}}{{end}}`}, - {"range with else", "{{range .X}}true{{else}}false{{end}}", noError, - `{{range .X}}"true"{{else}}"false"{{end}}`}, - {"range over pipeline", "{{range .X|.M}}true{{else}}false{{end}}", noError, - `{{range .X | .M}}"true"{{else}}"false"{{end}}`}, - {"range []int", "{{range .SI}}{{.}}{{end}}", noError, - `{{range .SI}}{{.}}{{end}}`}, - {"range 1 var", "{{range $x := .SI}}{{.}}{{end}}", noError, - `{{range $x := .SI}}{{.}}{{end}}`}, - {"range 2 vars", "{{range $x, $y := .SI}}{{.}}{{end}}", noError, - `{{range $x, $y := .SI}}{{.}}{{end}}`}, - {"constants", "{{range .SI 1 -3.2i true false 'a' nil}}{{end}}", noError, - `{{range .SI 1 -3.2i true false 'a' nil}}{{end}}`}, - {"template", "{{template `x`}}", noError, - `{{template "x"}}`}, - {"template with arg", "{{template `x` .Y}}", noError, - `{{template "x" .Y}}`}, - {"with", "{{with .X}}hello{{end}}", noError, - `{{with .X}}"hello"{{end}}`}, - {"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError, - `{{with .X}}"hello"{{else}}"goodbye"{{end}}`}, - {"elide newline", "{{true}}\\\n ", noError, - `{{true}}" "`}, - // Errors. - {"unclosed action", "hello{{range", hasError, ""}, - {"unmatched end", "{{end}}", hasError, ""}, - {"missing end", "hello{{range .x}}", hasError, ""}, - {"missing end after else", "hello{{range .x}}{{else}}", hasError, ""}, - {"undefined function", "hello{{undefined}}", hasError, ""}, - {"undefined variable", "{{$x}}", hasError, ""}, - {"variable undefined after end", "{{with $x := 4}}{{end}}{{$x}}", hasError, ""}, - {"variable undefined in template", "{{template $v}}", hasError, ""}, - {"declare with field", "{{with $x.Y := 4}}{{end}}", hasError, ""}, - {"template with field ref", "{{template .X}}", hasError, ""}, - {"template with var", "{{template $v}}", hasError, ""}, - {"invalid punctuation", "{{printf 3, 4}}", hasError, ""}, - {"multidecl outside range", "{{with $v, $u := 3}}{{end}}", hasError, ""}, - {"too many decls in range", "{{range $u, $v, $w := 3}}{{end}}", hasError, ""}, - {"dot applied to parentheses", "{{printf (printf .).}}", hasError, ""}, - {"adjacent args", "{{printf 3`x`}}", hasError, ""}, - {"adjacent args with .", "{{printf `x`.}}", hasError, ""}, - {"extra end after if", "{{if .X}}a{{else if .Y}}b{{end}}{{end}}", hasError, ""}, - {"invalid newline elision", "{{true}}\\{{true}}", hasError, ""}, - // Equals (and other chars) do not assignments make (yet). - {"bug0a", "{{$x := 0}}{{$x}}", noError, "{{$x := 0}}{{$x}}"}, - {"bug0b", "{{$x = 1}}{{$x}}", hasError, ""}, - {"bug0c", "{{$x ! 2}}{{$x}}", hasError, ""}, - {"bug0d", "{{$x % 3}}{{$x}}", hasError, ""}, - // Check the parse fails for := rather than comma. - {"bug0e", "{{range $x := $y := 3}}{{end}}", hasError, ""}, - // Another bug: variable read must ignore following punctuation. - {"bug1a", "{{$x:=.}}{{$x!2}}", hasError, ""}, // ! is just illegal here. - {"bug1b", "{{$x:=.}}{{$x+2}}", hasError, ""}, // $x+2 should not parse as ($x) (+2). - {"bug1c", "{{$x:=.}}{{$x +2}}", noError, "{{$x := .}}{{$x +2}}"}, // It's OK with a space. -} - -var builtins = map[string]interface{}{ - "printf": fmt.Sprintf, -} - -func testParse(doCopy bool, t *testing.T) { - textFormat = "%q" - defer func() { textFormat = "%s" }() - for _, test := range parseTests { - tmpl, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree), builtins) - switch { - case err == nil && !test.ok: - t.Errorf("%q: expected error; got none", test.name) - continue - case err != nil && test.ok: - t.Errorf("%q: unexpected error: %v", test.name, err) - continue - case err != nil && !test.ok: - // expected error, got one - if *debug { - fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err) - } - continue - } - var result string - if doCopy { - result = tmpl.Root.Copy().String() - } else { - result = tmpl.Root.String() - } - if result != test.result { - t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result) - } - } -} - -func TestParse(t *testing.T) { - testParse(false, t) -} - -// Same as TestParse, but we copy the node first -func TestParseCopy(t *testing.T) { - testParse(true, t) -} - -type isEmptyTest struct { - name string - input string - empty bool -} - -var isEmptyTests = []isEmptyTest{ - {"empty", ``, true}, - {"nonempty", `hello`, false}, - {"spaces only", " \t\n \t\n", true}, - {"definition", `{{define "x"}}something{{end}}`, true}, - {"definitions and space", "{{define `x`}}something{{end}}\n\n{{define `y`}}something{{end}}\n\n", true}, - {"definitions and text", "{{define `x`}}something{{end}}\nx\n{{define `y`}}something{{end}}\ny\n", false}, - {"definition and action", "{{define `x`}}something{{end}}{{if 3}}foo{{end}}", false}, -} - -func TestIsEmpty(t *testing.T) { - if !IsEmptyTree(nil) { - t.Errorf("nil tree is not empty") - } - for _, test := range isEmptyTests { - tree, err := New("root").Parse(test.input, "", "", make(map[string]*Tree), nil) - if err != nil { - t.Errorf("%q: unexpected error: %v", test.name, err) - continue - } - if empty := IsEmptyTree(tree.Root); empty != test.empty { - t.Errorf("%q: expected %t got %t", test.name, test.empty, empty) - } - } -} - -func TestErrorContextWithTreeCopy(t *testing.T) { - tree, err := New("root").Parse("{{if true}}{{end}}", "", "", make(map[string]*Tree), nil) - if err != nil { - t.Fatalf("unexpected tree parse failure: %v", err) - } - treeCopy := tree.Copy() - wantLocation, wantContext := tree.ErrorContext(tree.Root.Nodes[0]) - gotLocation, gotContext := treeCopy.ErrorContext(treeCopy.Root.Nodes[0]) - if wantLocation != gotLocation { - t.Errorf("wrong error location want %q got %q", wantLocation, gotLocation) - } - if wantContext != gotContext { - t.Errorf("wrong error location want %q got %q", wantContext, gotContext) - } -} - -// All failures, and the result is a string that must appear in the error message. -var errorTests = []parseTest{ - // Check line numbers are accurate. - {"unclosed1", - "line1\n{{", - hasError, `unclosed1:2: unexpected unclosed action in command`}, - {"unclosed2", - "line1\n{{define `x`}}line2\n{{", - hasError, `unclosed2:3: unexpected unclosed action in command`}, - // Specific errors. - {"function", - "{{foo}}", - hasError, `function "foo" not defined`}, - {"comment", - "{{/*}}", - hasError, `unclosed comment`}, - {"lparen", - "{{.X (1 2 3}}", - hasError, `unclosed left paren`}, - {"rparen", - "{{.X 1 2 3)}}", - hasError, `unexpected ")"`}, - {"space", - "{{`x`3}}", - hasError, `missing space?`}, - {"idchar", - "{{a#}}", - hasError, `'#'`}, - {"charconst", - "{{'a}}", - hasError, `unterminated character constant`}, - {"stringconst", - `{{"a}}`, - hasError, `unterminated quoted string`}, - {"rawstringconst", - "{{`a}}", - hasError, `unterminated raw quoted string`}, - {"number", - "{{0xi}}", - hasError, `number syntax`}, - {"multidefine", - "{{define `a`}}a{{end}}{{define `a`}}b{{end}}", - hasError, `multiple definition of template`}, - {"eof", - "{{range .X}}", - hasError, `unexpected EOF`}, - {"variable", - // Declare $x so it's defined, to avoid that error, and then check we don't parse a declaration. - "{{$x := 23}}{{with $x.y := 3}}{{$x 23}}{{end}}", - hasError, `unexpected ":="`}, - {"multidecl", - "{{$a,$b,$c := 23}}", - hasError, `too many declarations`}, - {"undefvar", - "{{$a}}", - hasError, `undefined variable`}, -} - -func TestErrors(t *testing.T) { - for _, test := range errorTests { - _, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree)) - if err == nil { - t.Errorf("%q: expected error", test.name) - continue - } - if !strings.Contains(err.Error(), test.result) { - t.Errorf("%q: error %q does not contain %q", test.name, err, test.result) - } - } -} diff --git a/vendor/github.com/alecthomas/template/testdata/file1.tmpl b/vendor/github.com/alecthomas/template/testdata/file1.tmpl deleted file mode 100644 index febf9d9f..00000000 --- a/vendor/github.com/alecthomas/template/testdata/file1.tmpl +++ /dev/null @@ -1,2 +0,0 @@ -{{define "x"}}TEXT{{end}} -{{define "dotV"}}{{.V}}{{end}} diff --git a/vendor/github.com/alecthomas/template/testdata/file2.tmpl b/vendor/github.com/alecthomas/template/testdata/file2.tmpl deleted file mode 100644 index 39bf6fb9..00000000 --- a/vendor/github.com/alecthomas/template/testdata/file2.tmpl +++ /dev/null @@ -1,2 +0,0 @@ -{{define "dot"}}{{.}}{{end}} -{{define "nested"}}{{template "dot" .}}{{end}} diff --git a/vendor/github.com/alecthomas/template/testdata/tmpl1.tmpl b/vendor/github.com/alecthomas/template/testdata/tmpl1.tmpl deleted file mode 100644 index b72b3a34..00000000 --- a/vendor/github.com/alecthomas/template/testdata/tmpl1.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -template1 -{{define "x"}}x{{end}} -{{template "y"}} diff --git a/vendor/github.com/alecthomas/template/testdata/tmpl2.tmpl b/vendor/github.com/alecthomas/template/testdata/tmpl2.tmpl deleted file mode 100644 index 16beba6e..00000000 --- a/vendor/github.com/alecthomas/template/testdata/tmpl2.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -template2 -{{define "y"}}y{{end}} -{{template "x"}} diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go index eaadeb80..cd439f51 100644 --- a/vendor/github.com/alecthomas/units/bytes.go +++ b/vendor/github.com/alecthomas/units/bytes.go @@ -27,6 +27,7 @@ var ( // ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB // and KiB are both 1024. +// However "kB", which is the correct SI spelling of 1000 Bytes, is rejected. func ParseBase2Bytes(s string) (Base2Bytes, error) { n, err := ParseUnit(s, bytesUnitMap) if err != nil { @@ -39,6 +40,12 @@ func (b Base2Bytes) String() string { return ToString(int64(b), 1024, "iB", "B") } +func (b *Base2Bytes) UnmarshalText(text []byte) error { + n, err := ParseBase2Bytes(string(text)) + *b = n + return err +} + var ( metricBytesUnitMap = MakeUnitMap("B", "B", 1000) ) @@ -68,12 +75,13 @@ func ParseMetricBytes(s string) (MetricBytes, error) { return MetricBytes(n), err } +// TODO: represents 1000B as uppercase "KB", while SI standard requires "kB". func (m MetricBytes) String() string { return ToString(int64(m), 1000, "B", "B") } // ParseStrictBytes supports both iB and B suffixes for base 2 and metric, -// respectively. That is, KiB represents 1024 and KB represents 1000. +// respectively. That is, KiB represents 1024 and kB, KB represent 1000. func ParseStrictBytes(s string) (int64, error) { n, err := ParseUnit(s, bytesUnitMap) if err != nil { diff --git a/vendor/github.com/alecthomas/units/bytes_test.go b/vendor/github.com/alecthomas/units/bytes_test.go deleted file mode 100644 index 6cbc79de..00000000 --- a/vendor/github.com/alecthomas/units/bytes_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package units - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestBase2BytesString(t *testing.T) { - assert.Equal(t, Base2Bytes(0).String(), "0B") - assert.Equal(t, Base2Bytes(1025).String(), "1KiB1B") - assert.Equal(t, Base2Bytes(1048577).String(), "1MiB1B") -} - -func TestParseBase2Bytes(t *testing.T) { - n, err := ParseBase2Bytes("0B") - assert.NoError(t, err) - assert.Equal(t, 0, int(n)) - n, err = ParseBase2Bytes("1KB") - assert.NoError(t, err) - assert.Equal(t, 1024, int(n)) - n, err = ParseBase2Bytes("1MB1KB25B") - assert.NoError(t, err) - assert.Equal(t, 1049625, int(n)) - n, err = ParseBase2Bytes("1.5MB") - assert.NoError(t, err) - assert.Equal(t, 1572864, int(n)) -} - -func TestMetricBytesString(t *testing.T) { - assert.Equal(t, MetricBytes(0).String(), "0B") - assert.Equal(t, MetricBytes(1001).String(), "1KB1B") - assert.Equal(t, MetricBytes(1001025).String(), "1MB1KB25B") -} - -func TestParseMetricBytes(t *testing.T) { - n, err := ParseMetricBytes("0B") - assert.NoError(t, err) - assert.Equal(t, 0, int(n)) - n, err = ParseMetricBytes("1KB1B") - assert.NoError(t, err) - assert.Equal(t, 1001, int(n)) - n, err = ParseMetricBytes("1MB1KB25B") - assert.NoError(t, err) - assert.Equal(t, 1001025, int(n)) - n, err = ParseMetricBytes("1.5MB") - assert.NoError(t, err) - assert.Equal(t, 1500000, int(n)) -} diff --git a/vendor/github.com/alecthomas/units/go.mod b/vendor/github.com/alecthomas/units/go.mod new file mode 100644 index 00000000..f77ddea7 --- /dev/null +++ b/vendor/github.com/alecthomas/units/go.mod @@ -0,0 +1,5 @@ +module github.com/alecthomas/units + +go 1.15 + +require github.com/stretchr/testify v1.4.0 diff --git a/vendor/github.com/alecthomas/units/go.sum b/vendor/github.com/alecthomas/units/go.sum new file mode 100644 index 00000000..8fdee585 --- /dev/null +++ b/vendor/github.com/alecthomas/units/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go index 8234a9d5..99b2fa4f 100644 --- a/vendor/github.com/alecthomas/units/si.go +++ b/vendor/github.com/alecthomas/units/si.go @@ -14,13 +14,37 @@ const ( ) func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { - return map[string]float64{ - shortSuffix: 1, - "K" + suffix: float64(scale), + res := map[string]float64{ + shortSuffix: 1, + // see below for "k" / "K" "M" + suffix: float64(scale * scale), "G" + suffix: float64(scale * scale * scale), "T" + suffix: float64(scale * scale * scale * scale), "P" + suffix: float64(scale * scale * scale * scale * scale), "E" + suffix: float64(scale * scale * scale * scale * scale * scale), } + + // Standard SI prefixes use lowercase "k" for kilo = 1000. + // For compatibility, and to be fool-proof, we accept both "k" and "K" in metric mode. + // + // However, official binary prefixes are always capitalized - "KiB" - + // and we specifically never parse "kB" as 1024B because: + // + // (1) people pedantic enough to use lowercase according to SI unlikely to abuse "k" to mean 1024 :-) + // + // (2) Use of capital K for 1024 was an informal tradition predating IEC prefixes: + // "The binary meaning of the kilobyte for 1024 bytes typically uses the symbol KB, with an + // uppercase letter K." + // -- https://en.wikipedia.org/wiki/Kilobyte#Base_2_(1024_bytes) + // "Capitalization of the letter K became the de facto standard for binary notation, although this + // could not be extended to higher powers, and use of the lowercase k did persist.[13][14][15]" + // -- https://en.wikipedia.org/wiki/Binary_prefix#History + // See also the extensive https://en.wikipedia.org/wiki/Timeline_of_binary_prefixes. + if scale == 1024 { + res["K"+suffix] = float64(scale) + } else { + res["k"+suffix] = float64(scale) + res["K"+suffix] = float64(scale) + } + return res } diff --git a/vendor/github.com/beorn7/perks/.gitignore b/vendor/github.com/beorn7/perks/.gitignore deleted file mode 100644 index 1bd9209a..00000000 --- a/vendor/github.com/beorn7/perks/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.test -*.prof diff --git a/vendor/github.com/beorn7/perks/README.md b/vendor/github.com/beorn7/perks/README.md deleted file mode 100644 index fc057777..00000000 --- a/vendor/github.com/beorn7/perks/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Perks for Go (golang.org) - -Perks contains the Go package quantile that computes approximate quantiles over -an unbounded data stream within low memory and CPU bounds. - -For more information and examples, see: -http://godoc.org/github.com/bmizerany/perks - -A very special thank you and shout out to Graham Cormode (Rutgers University), -Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and -Divesh Srivastava (AT&T Labs–Research) for their research and publication of -[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf) - -Thank you, also: -* Armon Dadgar (@armon) -* Andrew Gerrand (@nf) -* Brad Fitzpatrick (@bradfitz) -* Keith Rarick (@kr) - -FAQ: - -Q: Why not move the quantile package into the project root? -A: I want to add more packages to perks later. - -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/histogram/bench_test.go b/vendor/github.com/beorn7/perks/histogram/bench_test.go deleted file mode 100644 index 56c7e551..00000000 --- a/vendor/github.com/beorn7/perks/histogram/bench_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package histogram - -import ( - "math/rand" - "testing" -) - -func BenchmarkInsert10Bins(b *testing.B) { - b.StopTimer() - h := New(10) - b.StartTimer() - for i := 0; i < b.N; i++ { - f := rand.ExpFloat64() - h.Insert(f) - } -} - -func BenchmarkInsert100Bins(b *testing.B) { - b.StopTimer() - h := New(100) - b.StartTimer() - for i := 0; i < b.N; i++ { - f := rand.ExpFloat64() - h.Insert(f) - } -} diff --git a/vendor/github.com/beorn7/perks/histogram/histogram.go b/vendor/github.com/beorn7/perks/histogram/histogram.go deleted file mode 100644 index bef05c70..00000000 --- a/vendor/github.com/beorn7/perks/histogram/histogram.go +++ /dev/null @@ -1,108 +0,0 @@ -// Package histogram provides a Go implementation of BigML's histogram package -// for Clojure/Java. It is currently experimental. -package histogram - -import ( - "container/heap" - "math" - "sort" -) - -type Bin struct { - Count int - Sum float64 -} - -func (b *Bin) Update(x *Bin) { - b.Count += x.Count - b.Sum += x.Sum -} - -func (b *Bin) Mean() float64 { - return b.Sum / float64(b.Count) -} - -type Bins []*Bin - -func (bs Bins) Len() int { return len(bs) } -func (bs Bins) Less(i, j int) bool { return bs[i].Mean() < bs[j].Mean() } -func (bs Bins) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } - -func (bs *Bins) Push(x interface{}) { - *bs = append(*bs, x.(*Bin)) -} - -func (bs *Bins) Pop() interface{} { - return bs.remove(len(*bs) - 1) -} - -func (bs *Bins) remove(n int) *Bin { - if n < 0 || len(*bs) < n { - return nil - } - x := (*bs)[n] - *bs = append((*bs)[:n], (*bs)[n+1:]...) - return x -} - -type Histogram struct { - res *reservoir -} - -func New(maxBins int) *Histogram { - return &Histogram{res: newReservoir(maxBins)} -} - -func (h *Histogram) Insert(f float64) { - h.res.insert(&Bin{1, f}) - h.res.compress() -} - -func (h *Histogram) Bins() Bins { - return h.res.bins -} - -type reservoir struct { - n int - maxBins int - bins Bins -} - -func newReservoir(maxBins int) *reservoir { - return &reservoir{maxBins: maxBins} -} - -func (r *reservoir) insert(bin *Bin) { - r.n += bin.Count - i := sort.Search(len(r.bins), func(i int) bool { - return r.bins[i].Mean() >= bin.Mean() - }) - if i < 0 || i == r.bins.Len() { - // TODO(blake): Maybe use an .insert(i, bin) instead of - // performing the extra work of a heap.Push. - heap.Push(&r.bins, bin) - return - } - r.bins[i].Update(bin) -} - -func (r *reservoir) compress() { - for r.bins.Len() > r.maxBins { - minGapIndex := -1 - minGap := math.MaxFloat64 - for i := 0; i < r.bins.Len()-1; i++ { - gap := gapWeight(r.bins[i], r.bins[i+1]) - if minGap > gap { - minGap = gap - minGapIndex = i - } - } - prev := r.bins[minGapIndex] - next := r.bins.remove(minGapIndex + 1) - prev.Update(next) - } -} - -func gapWeight(prev, next *Bin) float64 { - return next.Mean() - prev.Mean() -} diff --git a/vendor/github.com/beorn7/perks/histogram/histogram_test.go b/vendor/github.com/beorn7/perks/histogram/histogram_test.go deleted file mode 100644 index 0575ebee..00000000 --- a/vendor/github.com/beorn7/perks/histogram/histogram_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package histogram - -import ( - "math/rand" - "testing" -) - -func TestHistogram(t *testing.T) { - const numPoints = 1e6 - const maxBins = 3 - - h := New(maxBins) - for i := 0; i < numPoints; i++ { - f := rand.ExpFloat64() - h.Insert(f) - } - - bins := h.Bins() - if g := len(bins); g > maxBins { - t.Fatalf("got %d bins, wanted <= %d", g, maxBins) - } - - for _, b := range bins { - t.Logf("%+v", b) - } - - if g := count(h.Bins()); g != numPoints { - t.Fatalf("binned %d points, wanted %d", g, numPoints) - } -} - -func count(bins Bins) int { - binCounts := 0 - for _, b := range bins { - binCounts += b.Count - } - return binCounts -} diff --git a/vendor/github.com/beorn7/perks/quantile/bench_test.go b/vendor/github.com/beorn7/perks/quantile/bench_test.go deleted file mode 100644 index 0bd0e4e7..00000000 --- a/vendor/github.com/beorn7/perks/quantile/bench_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package quantile - -import ( - "testing" -) - -func BenchmarkInsertTargeted(b *testing.B) { - b.ReportAllocs() - - s := NewTargeted(Targets) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) { - s := NewTargeted(TargetsSmallEpsilon) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkInsertBiased(b *testing.B) { - s := NewLowBiased(0.01) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) { - s := NewLowBiased(0.0001) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkQuery(b *testing.B) { - s := NewTargeted(Targets) - for i := float64(0); i < 1e6; i++ { - s.Insert(i) - } - b.ResetTimer() - n := float64(b.N) - for i := float64(0); i < n; i++ { - s.Query(i / n) - } -} - -func BenchmarkQuerySmallEpsilon(b *testing.B) { - s := NewTargeted(TargetsSmallEpsilon) - for i := float64(0); i < 1e6; i++ { - s.Insert(i) - } - b.ResetTimer() - n := float64(b.N) - for i := float64(0); i < n; i++ { - s.Query(i / n) - } -} diff --git a/vendor/github.com/beorn7/perks/quantile/example_test.go b/vendor/github.com/beorn7/perks/quantile/example_test.go deleted file mode 100644 index ab3293aa..00000000 --- a/vendor/github.com/beorn7/perks/quantile/example_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build go1.1 - -package quantile_test - -import ( - "bufio" - "fmt" - "log" - "os" - "strconv" - "time" - - "github.com/beorn7/perks/quantile" -) - -func Example_simple() { - ch := make(chan float64) - go sendFloats(ch) - - // Compute the 50th, 90th, and 99th percentile. - q := quantile.NewTargeted(map[float64]float64{ - 0.50: 0.005, - 0.90: 0.001, - 0.99: 0.0001, - }) - for v := range ch { - q.Insert(v) - } - - fmt.Println("perc50:", q.Query(0.50)) - fmt.Println("perc90:", q.Query(0.90)) - fmt.Println("perc99:", q.Query(0.99)) - fmt.Println("count:", q.Count()) - // Output: - // perc50: 5 - // perc90: 16 - // perc99: 223 - // count: 2388 -} - -func Example_mergeMultipleStreams() { - // Scenario: - // We have multiple database shards. On each shard, there is a process - // collecting query response times from the database logs and inserting - // them into a Stream (created via NewTargeted(0.90)), much like the - // Simple example. These processes expose a network interface for us to - // ask them to serialize and send us the results of their - // Stream.Samples so we may Merge and Query them. - // - // NOTES: - // * These sample sets are small, allowing us to get them - // across the network much faster than sending the entire list of data - // points. - // - // * For this to work correctly, we must supply the same quantiles - // a priori the process collecting the samples supplied to NewTargeted, - // even if we do not plan to query them all here. - ch := make(chan quantile.Samples) - getDBQuerySamples(ch) - q := quantile.NewTargeted(map[float64]float64{0.90: 0.001}) - for samples := range ch { - q.Merge(samples) - } - fmt.Println("perc90:", q.Query(0.90)) -} - -func Example_window() { - // Scenario: We want the 90th, 95th, and 99th percentiles for each - // minute. - - ch := make(chan float64) - go sendStreamValues(ch) - - tick := time.NewTicker(1 * time.Minute) - q := quantile.NewTargeted(map[float64]float64{ - 0.90: 0.001, - 0.95: 0.0005, - 0.99: 0.0001, - }) - for { - select { - case t := <-tick.C: - flushToDB(t, q.Samples()) - q.Reset() - case v := <-ch: - q.Insert(v) - } - } -} - -func sendStreamValues(ch chan float64) { - // Use your imagination -} - -func flushToDB(t time.Time, samples quantile.Samples) { - // Use your imagination -} - -// This is a stub for the above example. In reality this would hit the remote -// servers via http or something like it. -func getDBQuerySamples(ch chan quantile.Samples) {} - -func sendFloats(ch chan<- float64) { - f, err := os.Open("exampledata.txt") - if err != nil { - log.Fatal(err) - } - sc := bufio.NewScanner(f) - for sc.Scan() { - b := sc.Bytes() - v, err := strconv.ParseFloat(string(b), 64) - if err != nil { - log.Fatal(err) - } - ch <- v - } - if sc.Err() != nil { - log.Fatal(sc.Err()) - } - close(ch) -} diff --git a/vendor/github.com/beorn7/perks/quantile/stream_test.go b/vendor/github.com/beorn7/perks/quantile/stream_test.go deleted file mode 100644 index 85519509..00000000 --- a/vendor/github.com/beorn7/perks/quantile/stream_test.go +++ /dev/null @@ -1,215 +0,0 @@ -package quantile - -import ( - "math" - "math/rand" - "sort" - "testing" -) - -var ( - Targets = map[float64]float64{ - 0.01: 0.001, - 0.10: 0.01, - 0.50: 0.05, - 0.90: 0.01, - 0.99: 0.001, - } - TargetsSmallEpsilon = map[float64]float64{ - 0.01: 0.0001, - 0.10: 0.001, - 0.50: 0.005, - 0.90: 0.001, - 0.99: 0.0001, - } - LowQuantiles = []float64{0.01, 0.1, 0.5} - HighQuantiles = []float64{0.99, 0.9, 0.5} -) - -const RelativeEpsilon = 0.01 - -func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) { - sort.Float64s(a) - for quantile, epsilon := range Targets { - n := float64(len(a)) - k := int(quantile * n) - if k < 1 { - k = 1 - } - lower := int((quantile - epsilon) * n) - if lower < 1 { - lower = 1 - } - upper := int(math.Ceil((quantile + epsilon) * n)) - if upper > len(a) { - upper = len(a) - } - w, min, max := a[k-1], a[lower-1], a[upper-1] - if g := s.Query(quantile); g < min || g > max { - t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g) - } - } -} - -func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { - sort.Float64s(a) - for _, qu := range LowQuantiles { - n := float64(len(a)) - k := int(qu * n) - - lowerRank := int((1 - RelativeEpsilon) * qu * n) - upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n)) - w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] - if g := s.Query(qu); g < min || g > max { - t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) - } - } -} - -func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { - sort.Float64s(a) - for _, qu := range HighQuantiles { - n := float64(len(a)) - k := int(qu * n) - - lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n) - upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n)) - w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] - if g := s.Query(qu); g < min || g > max { - t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) - } - } -} - -func populateStream(s *Stream) []float64 { - a := make([]float64, 0, 1e5+100) - for i := 0; i < cap(a); i++ { - v := rand.NormFloat64() - // Add 5% asymmetric outliers. - if i%20 == 0 { - v = v*v + 1 - } - s.Insert(v) - a = append(a, v) - } - return a -} - -func TestTargetedQuery(t *testing.T) { - rand.Seed(42) - s := NewTargeted(Targets) - a := populateStream(s) - verifyPercsWithAbsoluteEpsilon(t, a, s) -} - -func TestTargetedQuerySmallSampleSize(t *testing.T) { - rand.Seed(42) - s := NewTargeted(TargetsSmallEpsilon) - a := []float64{1, 2, 3, 4, 5} - for _, v := range a { - s.Insert(v) - } - verifyPercsWithAbsoluteEpsilon(t, a, s) - // If not yet flushed, results should be precise: - if !s.flushed() { - for φ, want := range map[float64]float64{ - 0.01: 1, - 0.10: 1, - 0.50: 3, - 0.90: 5, - 0.99: 5, - } { - if got := s.Query(φ); got != want { - t.Errorf("want %f for φ=%f, got %f", want, φ, got) - } - } - } -} - -func TestLowBiasedQuery(t *testing.T) { - rand.Seed(42) - s := NewLowBiased(RelativeEpsilon) - a := populateStream(s) - verifyLowPercsWithRelativeEpsilon(t, a, s) -} - -func TestHighBiasedQuery(t *testing.T) { - rand.Seed(42) - s := NewHighBiased(RelativeEpsilon) - a := populateStream(s) - verifyHighPercsWithRelativeEpsilon(t, a, s) -} - -// BrokenTestTargetedMerge is broken, see Merge doc comment. -func BrokenTestTargetedMerge(t *testing.T) { - rand.Seed(42) - s1 := NewTargeted(Targets) - s2 := NewTargeted(Targets) - a := populateStream(s1) - a = append(a, populateStream(s2)...) - s1.Merge(s2.Samples()) - verifyPercsWithAbsoluteEpsilon(t, a, s1) -} - -// BrokenTestLowBiasedMerge is broken, see Merge doc comment. -func BrokenTestLowBiasedMerge(t *testing.T) { - rand.Seed(42) - s1 := NewLowBiased(RelativeEpsilon) - s2 := NewLowBiased(RelativeEpsilon) - a := populateStream(s1) - a = append(a, populateStream(s2)...) - s1.Merge(s2.Samples()) - verifyLowPercsWithRelativeEpsilon(t, a, s2) -} - -// BrokenTestHighBiasedMerge is broken, see Merge doc comment. -func BrokenTestHighBiasedMerge(t *testing.T) { - rand.Seed(42) - s1 := NewHighBiased(RelativeEpsilon) - s2 := NewHighBiased(RelativeEpsilon) - a := populateStream(s1) - a = append(a, populateStream(s2)...) - s1.Merge(s2.Samples()) - verifyHighPercsWithRelativeEpsilon(t, a, s2) -} - -func TestUncompressed(t *testing.T) { - q := NewTargeted(Targets) - for i := 100; i > 0; i-- { - q.Insert(float64(i)) - } - if g := q.Count(); g != 100 { - t.Errorf("want count 100, got %d", g) - } - // Before compression, Query should have 100% accuracy. - for quantile := range Targets { - w := quantile * 100 - if g := q.Query(quantile); g != w { - t.Errorf("want %f, got %f", w, g) - } - } -} - -func TestUncompressedSamples(t *testing.T) { - q := NewTargeted(map[float64]float64{0.99: 0.001}) - for i := 1; i <= 100; i++ { - q.Insert(float64(i)) - } - if g := q.Samples().Len(); g != 100 { - t.Errorf("want count 100, got %d", g) - } -} - -func TestUncompressedOne(t *testing.T) { - q := NewTargeted(map[float64]float64{0.99: 0.01}) - q.Insert(3.14) - if g := q.Query(0.90); g != 3.14 { - t.Error("want PI, got", g) - } -} - -func TestDefaults(t *testing.T) { - if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 { - t.Errorf("want 0, got %f", g) - } -} diff --git a/vendor/github.com/beorn7/perks/topk/topk.go b/vendor/github.com/beorn7/perks/topk/topk.go deleted file mode 100644 index 5ac3d990..00000000 --- a/vendor/github.com/beorn7/perks/topk/topk.go +++ /dev/null @@ -1,90 +0,0 @@ -package topk - -import ( - "sort" -) - -// http://www.cs.ucsb.edu/research/tech_reports/reports/2005-23.pdf - -type Element struct { - Value string - Count int -} - -type Samples []*Element - -func (sm Samples) Len() int { - return len(sm) -} - -func (sm Samples) Less(i, j int) bool { - return sm[i].Count < sm[j].Count -} - -func (sm Samples) Swap(i, j int) { - sm[i], sm[j] = sm[j], sm[i] -} - -type Stream struct { - k int - mon map[string]*Element - - // the minimum Element - min *Element -} - -func New(k int) *Stream { - s := new(Stream) - s.k = k - s.mon = make(map[string]*Element) - s.min = &Element{} - - // Track k+1 so that less frequenet items contended for that spot, - // resulting in k being more accurate. - return s -} - -func (s *Stream) Insert(x string) { - s.insert(&Element{x, 1}) -} - -func (s *Stream) Merge(sm Samples) { - for _, e := range sm { - s.insert(e) - } -} - -func (s *Stream) insert(in *Element) { - e := s.mon[in.Value] - if e != nil { - e.Count++ - } else { - if len(s.mon) < s.k+1 { - e = &Element{in.Value, in.Count} - s.mon[in.Value] = e - } else { - e = s.min - delete(s.mon, e.Value) - e.Value = in.Value - e.Count += in.Count - s.min = e - } - } - if e.Count < s.min.Count { - s.min = e - } -} - -func (s *Stream) Query() Samples { - var sm Samples - for _, e := range s.mon { - sm = append(sm, e) - } - sort.Sort(sort.Reverse(sm)) - - if len(sm) < s.k { - return sm - } - - return sm[:s.k] -} diff --git a/vendor/github.com/beorn7/perks/topk/topk_test.go b/vendor/github.com/beorn7/perks/topk/topk_test.go deleted file mode 100644 index c24f0f72..00000000 --- a/vendor/github.com/beorn7/perks/topk/topk_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package topk - -import ( - "fmt" - "math/rand" - "sort" - "testing" -) - -func TestTopK(t *testing.T) { - stream := New(10) - ss := []*Stream{New(10), New(10), New(10)} - m := make(map[string]int) - for _, s := range ss { - for i := 0; i < 1e6; i++ { - v := fmt.Sprintf("%x", int8(rand.ExpFloat64())) - s.Insert(v) - m[v]++ - } - stream.Merge(s.Query()) - } - - var sm Samples - for x, s := range m { - sm = append(sm, &Element{x, s}) - } - sort.Sort(sort.Reverse(sm)) - - g := stream.Query() - if len(g) != 10 { - t.Fatalf("got %d, want 10", len(g)) - } - for i, e := range g { - if sm[i].Value != e.Value { - t.Errorf("at %d: want %q, got %q", i, sm[i].Value, e.Value) - } - } -} - -func TestQuery(t *testing.T) { - queryTests := []struct { - value string - expected int - }{ - {"a", 1}, - {"b", 2}, - {"c", 2}, - } - - stream := New(2) - for _, tt := range queryTests { - stream.Insert(tt.value) - if n := len(stream.Query()); n != tt.expected { - t.Errorf("want %d, got %d", tt.expected, n) - } - } -} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml new file mode 100644 index 00000000..c516ea88 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - "1.x" + - master +env: + - TAGS="" + - TAGS="-tags purego" +script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 00000000..24b53065 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 00000000..2fd8693c --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,67 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod new file mode 100644 index 00000000..49f67608 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/cespare/xxhash/v2 + +go 1.11 diff --git a/vendor/github.com/eapache/go-xerial-snappy/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709-1 b/vendor/github.com/cespare/xxhash/v2/go.sum similarity index 100% rename from vendor/github.com/eapache/go-xerial-snappy/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709-1 rename to vendor/github.com/cespare/xxhash/v2/go.sum diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 00000000..db0b35fb --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,236 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go new file mode 100644 index 00000000..ad14b807 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 00000000..d580e32a --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ b_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 00000000..4a5a8216 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 00000000..fc9bea7a --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,15 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 00000000..53bf76ef --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,46 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "reflect" + "unsafe" +) + +// Notes: +// +// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ +// for some discussion about these unsafe conversions. +// +// In the future it's possible that compiler optimizations will make these +// unsafe operations unnecessary: https://golang.org/issue/2205. +// +// Both of these wrapper functions still incur function call overhead since they +// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write +// for strings to squeeze out a bit more speed. Mid-stack inlining should +// eventually fix this. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return d.Write(b) +} diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/davecgh/go-spew/.gitignore deleted file mode 100644 index 00268614..00000000 --- a/vendor/github.com/davecgh/go-spew/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml deleted file mode 100644 index 1f4cbf54..00000000 --- a/vendor/github.com/davecgh/go-spew/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go -go_import_path: github.com/davecgh/go-spew -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - tip -sudo: false -install: - - go get -v github.com/alecthomas/gometalinter - - gometalinter --install -script: - - export PATH=$PATH:$HOME/gopath/bin - - export GORACE="halt_on_error=1" - - test -z "$(gometalinter --disable-all - --enable=gofmt - --enable=golint - --enable=vet - --enable=gosimple - --enable=unconvert - --deadline=4m ./spew | tee /dev/stderr)" - - go test -v -race -tags safe ./spew - - go test -v -race -tags testcgo ./spew -covermode=atomic -coverprofile=profile.cov -after_success: - - go get -v github.com/mattn/goveralls - - goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md deleted file mode 100644 index f6ed02c3..00000000 --- a/vendor/github.com/davecgh/go-spew/README.md +++ /dev/null @@ -1,201 +0,0 @@ -go-spew -======= - -[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)](https://travis-ci.org/davecgh/go-spew) -[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) -[![Coverage Status](https://img.shields.io/coveralls/davecgh/go-spew.svg)](https://coveralls.io/r/davecgh/go-spew?branch=master) - -Go-spew implements a deep pretty printer for Go data structures to aid in -debugging. A comprehensive suite of tests with 100% test coverage is provided -to ensure proper functionality. See `test_coverage.txt` for the gocov coverage -report. Go-spew is licensed under the liberal ISC license, so it may be used in -open source or commercial projects. - -If you're interested in reading about how this package came to life and some -of the challenges involved in providing a deep pretty printer, there is a blog -post about it -[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). - -## Documentation - -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/davecgh/go-spew/spew) - -Full `go doc` style documentation for the project can be viewed online without -installing this package by using the excellent GoDoc site here: -http://godoc.org/github.com/davecgh/go-spew/spew - -You can also view the documentation locally once the package is installed with -the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to -http://localhost:6060/pkg/github.com/davecgh/go-spew/spew - -## Installation - -```bash -$ go get -u github.com/davecgh/go-spew/spew -``` - -## Quick Start - -Add this import line to the file you're working in: - -```Go -import "github.com/davecgh/go-spew/spew" -``` - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - -```Go -spew.Dump(myVar1, myVar2, ...) -spew.Fdump(someWriter, myVar1, myVar2, ...) -str := spew.Sdump(myVar1, myVar2, ...) -``` - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most -compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types -and pointer addresses): - -```Go -spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) -spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) -spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) -spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) -``` - -## Debugging a Web Application Example - -Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production. - -```Go -package main - -import ( - "fmt" - "html" - "net/http" - - "github.com/davecgh/go-spew/spew" -) - -func handler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/html") - fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:]) - fmt.Fprintf(w, "") -} - -func main() { - http.HandleFunc("/", handler) - http.ListenAndServe(":8080", nil) -} -``` - -## Sample Dump Output - -``` -(main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) { - (string) "one": (bool) true - } -} -([]uint8) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| -} -``` - -## Sample Formatter Output - -Double pointer to a uint8: -``` - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 -``` - -Pointer to circular struct with a uint8 field and a pointer to itself: -``` - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} -``` - -## Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available via the -spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -``` -* Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - -* MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - -* DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - -* DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. This option - relies on access to the unsafe package, so it will not have any effect when - running in environments without access to the unsafe package such as Google - App Engine or with the "safe" build tag specified. - Pointer method invocation is enabled by default. - -* DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - -* DisableCapacities - DisableCapacities specifies whether to disable the printing of capacities - for arrays, slices, maps and channels. This is useful when diffing data - structures in tests. - -* ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - -* SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are supported, - with other types sorted according to the reflect.Value.String() output - which guarantees display stability. Natural map order is used by - default. - -* SpewKeys - SpewKeys specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only considered - if SortKeys is true. - -``` - -## Unsafe Package Dependency - -This package relies on the unsafe package to perform some of the more advanced -features, however it also supports a "limited" mode which allows it to work in -environments where the unsafe package is not available. By default, it will -operate in this mode on Google App Engine and when compiled with GopherJS. The -"safe" build tag may also be specified to force the package to build without -using the unsafe package. - -## License - -Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh deleted file mode 100644 index 9579497e..00000000 --- a/vendor/github.com/davecgh/go-spew/cov_report.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# This script uses gocov to generate a test coverage report. -# The gocov tool my be obtained with the following command: -# go get github.com/axw/gocov/gocov -# -# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. - -# Check for gocov. -if ! type gocov >/dev/null 2>&1; then - echo >&2 "This script requires the gocov tool." - echo >&2 "You may obtain it with the following command:" - echo >&2 "go get github.com/axw/gocov/gocov" - exit 1 -fi - -# Only run the cgo tests if gcc is installed. -if type gcc >/dev/null 2>&1; then - (cd spew && gocov test -tags testcgo | gocov report) -else - (cd spew && gocov test | gocov report) -fi diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go deleted file mode 100644 index 0f5ce47d..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/common_test.go +++ /dev/null @@ -1,298 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -// custom type to test Stinger interface on non-pointer receiver. -type stringer string - -// String implements the Stringer interface for testing invocation of custom -// stringers on types with non-pointer receivers. -func (s stringer) String() string { - return "stringer " + string(s) -} - -// custom type to test Stinger interface on pointer receiver. -type pstringer string - -// String implements the Stringer interface for testing invocation of custom -// stringers on types with only pointer receivers. -func (s *pstringer) String() string { - return "stringer " + string(*s) -} - -// xref1 and xref2 are cross referencing structs for testing circular reference -// detection. -type xref1 struct { - ps2 *xref2 -} -type xref2 struct { - ps1 *xref1 -} - -// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular -// reference for testing detection. -type indirCir1 struct { - ps2 *indirCir2 -} -type indirCir2 struct { - ps3 *indirCir3 -} -type indirCir3 struct { - ps1 *indirCir1 -} - -// embed is used to test embedded structures. -type embed struct { - a string -} - -// embedwrap is used to test embedded structures. -type embedwrap struct { - *embed - e *embed -} - -// panicer is used to intentionally cause a panic for testing spew properly -// handles them -type panicer int - -func (p panicer) String() string { - panic("test panic") -} - -// customError is used to test custom error interface invocation. -type customError int - -func (e customError) Error() string { - return fmt.Sprintf("error: %d", int(e)) -} - -// stringizeWants converts a slice of wanted test output into a format suitable -// for a test error message. -func stringizeWants(wants []string) string { - s := "" - for i, want := range wants { - if i > 0 { - s += fmt.Sprintf("want%d: %s", i+1, want) - } else { - s += "want: " + want - } - } - return s -} - -// testFailed returns whether or not a test failed by checking if the result -// of the test is in the slice of wanted strings. -func testFailed(result string, wants []string) bool { - for _, want := range wants { - if result == want { - return false - } - } - return true -} - -type sortableStruct struct { - x int -} - -func (ss sortableStruct) String() string { - return fmt.Sprintf("ss.%d", ss.x) -} - -type unsortableStruct struct { - x int -} - -type sortTestCase struct { - input []reflect.Value - expected []reflect.Value -} - -func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) { - getInterfaces := func(values []reflect.Value) []interface{} { - interfaces := []interface{}{} - for _, v := range values { - interfaces = append(interfaces, v.Interface()) - } - return interfaces - } - - for _, test := range tests { - spew.SortValues(test.input, cs) - // reflect.DeepEqual cannot really make sense of reflect.Value, - // probably because of all the pointer tricks. For instance, - // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{} - // instead. - input := getInterfaces(test.input) - expected := getInterfaces(test.expected) - if !reflect.DeepEqual(input, expected) { - t.Errorf("Sort mismatch:\n %v != %v", input, expected) - } - } -} - -// TestSortValues ensures the sort functionality for relect.Value based sorting -// works as intended. -func TestSortValues(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - embedA := v(embed{"a"}) - embedB := v(embed{"b"}) - embedC := v(embed{"c"}) - tests := []sortTestCase{ - // No values. - { - []reflect.Value{}, - []reflect.Value{}, - }, - // Bools. - { - []reflect.Value{v(false), v(true), v(false)}, - []reflect.Value{v(false), v(false), v(true)}, - }, - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Uints. - { - []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))}, - []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))}, - }, - // Floats. - { - []reflect.Value{v(2.0), v(1.0), v(3.0)}, - []reflect.Value{v(1.0), v(2.0), v(3.0)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // Array - { - []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})}, - []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})}, - }, - // Uintptrs. - { - []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))}, - []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))}, - }, - // SortableStructs. - { - // Note: not sorted - DisableMethods is set. - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - // Note: not sorted - SpewKeys is false. - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - }, - // Invalid. - { - []reflect.Value{embedB, embedA, embedC}, - []reflect.Value{embedB, embedA, embedC}, - }, - } - cs := spew.ConfigState{DisableMethods: true, SpewKeys: false} - helpTestSortValues(tests, &cs, t) -} - -// TestSortValuesWithMethods ensures the sort functionality for relect.Value -// based sorting works as intended when using string methods. -func TestSortValuesWithMethods(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - tests := []sortTestCase{ - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // SortableStructs. - { - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - // Note: not sorted - SpewKeys is false. - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - }, - } - cs := spew.ConfigState{DisableMethods: false, SpewKeys: false} - helpTestSortValues(tests, &cs, t) -} - -// TestSortValuesWithSpew ensures the sort functionality for relect.Value -// based sorting works as intended when using spew to stringify keys. -func TestSortValuesWithSpew(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - tests := []sortTestCase{ - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // SortableStructs. - { - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})}, - }, - } - cs := spew.ConfigState{DisableMethods: true, SpewKeys: true} - helpTestSortValues(tests, &cs, t) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go deleted file mode 100644 index 4a31a2ee..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump_test.go +++ /dev/null @@ -1,1042 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Test Summary: -NOTE: For each test, a nil pointer, a single pointer and double pointer to the -base test element are also tested to ensure proper indirection across all types. - -- Max int8, int16, int32, int64, int -- Max uint8, uint16, uint32, uint64, uint -- Boolean true and false -- Standard complex64 and complex128 -- Array containing standard ints -- Array containing type with custom formatter on pointer receiver only -- Array containing interfaces -- Array containing bytes -- Slice containing standard float32 values -- Slice containing type with custom formatter on pointer receiver only -- Slice containing interfaces -- Slice containing bytes -- Nil slice -- Standard string -- Nil interface -- Sub-interface -- Map with string keys and int vals -- Map with custom formatter type on pointer receiver only keys and vals -- Map with interface keys and values -- Map with nil interface value -- Struct with primitives -- Struct that contains another struct -- Struct that contains custom type with Stringer pointer interface via both - exported and unexported fields -- Struct that contains embedded struct and field to same struct -- Uintptr to 0 (null pointer) -- Uintptr address of real variable -- Unsafe.Pointer to 0 (null pointer) -- Unsafe.Pointer to address of real variable -- Nil channel -- Standard int channel -- Function with no params and no returns -- Function with param and no returns -- Function with multiple params and multiple returns -- Struct that is circular through self referencing -- Structs that are circular through cross referencing -- Structs that are indirectly circular -- Type that panics in its Stringer interface -*/ - -package spew_test - -import ( - "bytes" - "fmt" - "testing" - "unsafe" - - "github.com/davecgh/go-spew/spew" -) - -// dumpTest is used to describe a test to be performed against the Dump method. -type dumpTest struct { - in interface{} - wants []string -} - -// dumpTests houses all of the tests to be performed against the Dump method. -var dumpTests = make([]dumpTest, 0) - -// addDumpTest is a helper method to append the passed input and desired result -// to dumpTests -func addDumpTest(in interface{}, wants ...string) { - test := dumpTest{in, wants} - dumpTests = append(dumpTests, test) -} - -func addIntDumpTests() { - // Max int8. - v := int8(127) - nv := (*int8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int8" - vs := "127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Max int16. - v2 := int16(32767) - nv2 := (*int16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "int16" - v2s := "32767" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Max int32. - v3 := int32(2147483647) - nv3 := (*int32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "int32" - v3s := "2147483647" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Max int64. - v4 := int64(9223372036854775807) - nv4 := (*int64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "int64" - v4s := "9223372036854775807" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Max int. - v5 := int(2147483647) - nv5 := (*int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "int" - v5s := "2147483647" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addUintDumpTests() { - // Max uint8. - v := uint8(255) - nv := (*uint8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uint8" - vs := "255" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Max uint16. - v2 := uint16(65535) - nv2 := (*uint16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Max uint32. - v3 := uint32(4294967295) - nv3 := (*uint32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "uint32" - v3s := "4294967295" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Max uint64. - v4 := uint64(18446744073709551615) - nv4 := (*uint64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "uint64" - v4s := "18446744073709551615" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Max uint. - v5 := uint(4294967295) - nv5 := (*uint)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "uint" - v5s := "4294967295" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addBoolDumpTests() { - // Boolean true. - v := bool(true) - nv := (*bool)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "bool" - vs := "true" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Boolean false. - v2 := bool(false) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "bool" - v2s := "false" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addFloatDumpTests() { - // Standard float32. - v := float32(3.1415) - nv := (*float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "3.1415" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Standard float64. - v2 := float64(3.1415926) - nv2 := (*float64)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "float64" - v2s := "3.1415926" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addComplexDumpTests() { - // Standard complex64. - v := complex(float32(6), -2) - nv := (*complex64)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "complex64" - vs := "(6-2i)" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Standard complex128. - v2 := complex(float64(-6), 2) - nv2 := (*complex128)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "complex128" - v2s := "(-6+2i)" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addArrayDumpTests() { - // Array containing standard ints. - v := [3]int{1, 2, 3} - vLen := fmt.Sprintf("%d", len(v)) - vCap := fmt.Sprintf("%d", cap(v)) - nv := (*[3]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int" - vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" + - vt + ") 2,\n (" + vt + ") 3\n}" - addDumpTest(v, "([3]"+vt+") "+vs+"\n") - addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*[3]"+vt+")()\n") - - // Array containing type with custom formatter on pointer receiver only. - v2i0 := pstringer("1") - v2i1 := pstringer("2") - v2i2 := pstringer("3") - v2 := [3]pstringer{v2i0, v2i1, v2i2} - v2i0Len := fmt.Sprintf("%d", len(v2i0)) - v2i1Len := fmt.Sprintf("%d", len(v2i1)) - v2i2Len := fmt.Sprintf("%d", len(v2i2)) - v2Len := fmt.Sprintf("%d", len(v2)) - v2Cap := fmt.Sprintf("%d", cap(v2)) - nv2 := (*[3]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.pstringer" - v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + - ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t + - ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t + - ") (len=" + v2i2Len + ") " + "stringer 3\n}" - v2s := v2sp - if spew.UnsafeDisabled { - v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + - ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" + - v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len + - ") " + "\"3\"\n}" - } - addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n") - addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n") - addDumpTest(nv2, "(*[3]"+v2t+")()\n") - - // Array containing interfaces. - v3i0 := "one" - v3 := [3]interface{}{v3i0, int(2), uint(3)} - v3i0Len := fmt.Sprintf("%d", len(v3i0)) - v3Len := fmt.Sprintf("%d", len(v3)) - v3Cap := fmt.Sprintf("%d", cap(v3)) - nv3 := (*[3]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[3]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + - "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + - v3t4 + ") 3\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Array containing bytes. - v4 := [34]byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - v4Len := fmt.Sprintf("%d", len(v4)) - v4Cap := fmt.Sprintf("%d", cap(v4)) - nv4 := (*[34]byte)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[34]uint8" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + - " |............... |\n" + - " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + - " |!\"#$%&'()*+,-./0|\n" + - " 00000020 31 32 " + - " |12|\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") -} - -func addSliceDumpTests() { - // Slice containing standard float32 values. - v := []float32{3.14, 6.28, 12.56} - vLen := fmt.Sprintf("%d", len(v)) - vCap := fmt.Sprintf("%d", cap(v)) - nv := (*[]float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" + - vt + ") 6.28,\n (" + vt + ") 12.56\n}" - addDumpTest(v, "([]"+vt+") "+vs+"\n") - addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*[]"+vt+")()\n") - - // Slice containing type with custom formatter on pointer receiver only. - v2i0 := pstringer("1") - v2i1 := pstringer("2") - v2i2 := pstringer("3") - v2 := []pstringer{v2i0, v2i1, v2i2} - v2i0Len := fmt.Sprintf("%d", len(v2i0)) - v2i1Len := fmt.Sprintf("%d", len(v2i1)) - v2i2Len := fmt.Sprintf("%d", len(v2i2)) - v2Len := fmt.Sprintf("%d", len(v2)) - v2Cap := fmt.Sprintf("%d", cap(v2)) - nv2 := (*[]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.pstringer" - v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" + - v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len + - ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " + - "stringer 3\n}" - addDumpTest(v2, "([]"+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*[]"+v2t+")()\n") - - // Slice containing interfaces. - v3i0 := "one" - v3 := []interface{}{v3i0, int(2), uint(3), nil} - v3i0Len := fmt.Sprintf("%d", len(v3i0)) - v3Len := fmt.Sprintf("%d", len(v3)) - v3Cap := fmt.Sprintf("%d", cap(v3)) - nv3 := (*[]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3t5 := "interface {}" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + - "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + - v3t4 + ") 3,\n (" + v3t5 + ") \n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Slice containing bytes. - v4 := []byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - v4Len := fmt.Sprintf("%d", len(v4)) - v4Cap := fmt.Sprintf("%d", cap(v4)) - nv4 := (*[]byte)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[]uint8" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + - " |............... |\n" + - " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + - " |!\"#$%&'()*+,-./0|\n" + - " 00000020 31 32 " + - " |12|\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Nil slice. - v5 := []int(nil) - nv5 := (*[]int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "[]int" - v5s := "" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addStringDumpTests() { - // Standard string. - v := "test" - vLen := fmt.Sprintf("%d", len(v)) - nv := (*string)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "string" - vs := "(len=" + vLen + ") \"test\"" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addInterfaceDumpTests() { - // Nil interface. - var v interface{} - nv := (*interface{})(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "interface {}" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Sub-interface. - v2 := interface{}(uint16(65535)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addMapDumpTests() { - // Map with string keys and int vals. - k := "one" - kk := "two" - m := map[string]int{k: 1, kk: 2} - klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up - kkLen := fmt.Sprintf("%d", len(kk)) - mLen := fmt.Sprintf("%d", len(m)) - nilMap := map[string]int(nil) - nm := (*map[string]int)(nil) - pm := &m - mAddr := fmt.Sprintf("%p", pm) - pmAddr := fmt.Sprintf("%p", &pm) - mt := "map[string]int" - mt1 := "string" - mt2 := "int" - ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " + - "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen + - ") \"two\": (" + mt2 + ") 2\n}" - ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " + - "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen + - ") \"one\": (" + mt2 + ") 1\n}" - addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n") - addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n", - "(*"+mt+")("+mAddr+")("+ms2+")\n") - addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n", - "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n") - addDumpTest(nm, "(*"+mt+")()\n") - addDumpTest(nilMap, "("+mt+") \n") - - // Map with custom formatter type on pointer receiver only keys and vals. - k2 := pstringer("one") - v2 := pstringer("1") - m2 := map[pstringer]pstringer{k2: v2} - k2Len := fmt.Sprintf("%d", len(k2)) - v2Len := fmt.Sprintf("%d", len(v2)) - m2Len := fmt.Sprintf("%d", len(m2)) - nilMap2 := map[pstringer]pstringer(nil) - nm2 := (*map[pstringer]pstringer)(nil) - pm2 := &m2 - m2Addr := fmt.Sprintf("%p", pm2) - pm2Addr := fmt.Sprintf("%p", &pm2) - m2t := "map[spew_test.pstringer]spew_test.pstringer" - m2t1 := "spew_test.pstringer" - m2t2 := "spew_test.pstringer" - m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " + - "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}" - if spew.UnsafeDisabled { - m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + - ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len + - ") \"1\"\n}" - } - addDumpTest(m2, "("+m2t+") "+m2s+"\n") - addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n") - addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n") - addDumpTest(nm2, "(*"+m2t+")()\n") - addDumpTest(nilMap2, "("+m2t+") \n") - - // Map with interface keys and values. - k3 := "one" - k3Len := fmt.Sprintf("%d", len(k3)) - m3 := map[interface{}]interface{}{k3: 1} - m3Len := fmt.Sprintf("%d", len(m3)) - nilMap3 := map[interface{}]interface{}(nil) - nm3 := (*map[interface{}]interface{})(nil) - pm3 := &m3 - m3Addr := fmt.Sprintf("%p", pm3) - pm3Addr := fmt.Sprintf("%p", &pm3) - m3t := "map[interface {}]interface {}" - m3t1 := "string" - m3t2 := "int" - m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " + - "\"one\": (" + m3t2 + ") 1\n}" - addDumpTest(m3, "("+m3t+") "+m3s+"\n") - addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n") - addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n") - addDumpTest(nm3, "(*"+m3t+")()\n") - addDumpTest(nilMap3, "("+m3t+") \n") - - // Map with nil interface value. - k4 := "nil" - k4Len := fmt.Sprintf("%d", len(k4)) - m4 := map[string]interface{}{k4: nil} - m4Len := fmt.Sprintf("%d", len(m4)) - nilMap4 := map[string]interface{}(nil) - nm4 := (*map[string]interface{})(nil) - pm4 := &m4 - m4Addr := fmt.Sprintf("%p", pm4) - pm4Addr := fmt.Sprintf("%p", &pm4) - m4t := "map[string]interface {}" - m4t1 := "string" - m4t2 := "interface {}" - m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" + - " \"nil\": (" + m4t2 + ") \n}" - addDumpTest(m4, "("+m4t+") "+m4s+"\n") - addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n") - addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n") - addDumpTest(nm4, "(*"+m4t+")()\n") - addDumpTest(nilMap4, "("+m4t+") \n") -} - -func addStructDumpTests() { - // Struct with primitives. - type s1 struct { - a int8 - b uint8 - } - v := s1{127, 255} - nv := (*s1)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.s1" - vt2 := "int8" - vt3 := "uint8" - vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Struct that contains another struct. - type s2 struct { - s1 s1 - b bool - } - v2 := s2{s1{127, 255}, true} - nv2 := (*s2)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.s2" - v2t2 := "spew_test.s1" - v2t3 := "int8" - v2t4 := "uint8" - v2t5 := "bool" - v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" + - v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Struct that contains custom type with Stringer pointer interface via both - // exported and unexported fields. - type s3 struct { - s pstringer - S pstringer - } - v3 := s3{"test", "test2"} - nv3 := (*s3)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.s3" - v3t2 := "spew_test.pstringer" - v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 + - ") (len=5) stringer test2\n}" - v3sp := v3s - if spew.UnsafeDisabled { - v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + - v3t2 + ") (len=5) \"test2\"\n}" - v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + - v3t2 + ") (len=5) stringer test2\n}" - } - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Struct that contains embedded struct and field to same struct. - e := embed{"embedstr"} - eLen := fmt.Sprintf("%d", len("embedstr")) - v4 := embedwrap{embed: &e, e: &e} - nv4 := (*embedwrap)(nil) - pv4 := &v4 - eAddr := fmt.Sprintf("%p", &e) - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "spew_test.embedwrap" - v4t2 := "spew_test.embed" - v4t3 := "string" - v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 + - ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 + - ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" + - " \"embedstr\"\n })\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") -} - -func addUintptrDumpTests() { - // Null pointer. - v := uintptr(0) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uintptr" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - - // Address of real variable. - i := 1 - v2 := uintptr(unsafe.Pointer(&i)) - nv2 := (*uintptr)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uintptr" - v2s := fmt.Sprintf("%p", &i) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addUnsafePointerDumpTests() { - // Null pointer. - v := unsafe.Pointer(nil) - nv := (*unsafe.Pointer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "unsafe.Pointer" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Address of real variable. - i := 1 - v2 := unsafe.Pointer(&i) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "unsafe.Pointer" - v2s := fmt.Sprintf("%p", &i) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addChanDumpTests() { - // Nil channel. - var v chan int - pv := &v - nv := (*chan int)(nil) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "chan int" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Real channel. - v2 := make(chan int) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "chan int" - v2s := fmt.Sprintf("%p", v2) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addFuncDumpTests() { - // Function with no params and no returns. - v := addIntDumpTests - nv := (*func())(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "func()" - vs := fmt.Sprintf("%p", v) - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Function with param and no returns. - v2 := TestDump - nv2 := (*func(*testing.T))(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "func(*testing.T)" - v2s := fmt.Sprintf("%p", v2) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Function with multiple params and multiple returns. - var v3 = func(i int, s string) (b bool, err error) { - return true, nil - } - nv3 := (*func(int, string) (bool, error))(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "func(int, string) (bool, error)" - v3s := fmt.Sprintf("%p", v3) - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") -} - -func addCircularDumpTests() { - // Struct that is circular through self referencing. - type circular struct { - c *circular - } - v := circular{nil} - v.c = &v - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.circular" - vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" + - vAddr + ")()\n })\n}" - vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") - - // Structs that are circular through cross referencing. - v2 := xref1{nil} - ts2 := xref2{&v2} - v2.ps2 = &ts2 - pv2 := &v2 - ts2Addr := fmt.Sprintf("%p", &ts2) - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.xref1" - v2t2 := "spew_test.xref2" - v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + - ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr + - ")()\n })\n })\n}" - v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + - ")(" + v2Addr + ")()\n })\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n") - - // Structs that are indirectly circular. - v3 := indirCir1{nil} - tic2 := indirCir2{nil} - tic3 := indirCir3{&v3} - tic2.ps3 = &tic3 - v3.ps2 = &tic2 - pv3 := &v3 - tic2Addr := fmt.Sprintf("%p", &tic2) - tic3Addr := fmt.Sprintf("%p", &tic3) - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.indirCir1" - v3t2 := "spew_test.indirCir2" - v3t3 := "spew_test.indirCir3" - v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + - ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + - ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr + - ")()\n })\n })\n })\n}" - v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + - ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + - ")()\n })\n })\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n") -} - -func addPanicDumpTests() { - // Type that panics in its Stringer interface. - v := panicer(127) - nv := (*panicer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.panicer" - vs := "(PANIC=test panic)127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addErrorDumpTests() { - // Type that has a custom Error interface. - v := customError(127) - nv := (*customError)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.customError" - vs := "error: 127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -// TestDump executes all of the tests described by dumpTests. -func TestDump(t *testing.T) { - // Setup tests. - addIntDumpTests() - addUintDumpTests() - addBoolDumpTests() - addFloatDumpTests() - addComplexDumpTests() - addArrayDumpTests() - addSliceDumpTests() - addStringDumpTests() - addInterfaceDumpTests() - addMapDumpTests() - addStructDumpTests() - addUintptrDumpTests() - addUnsafePointerDumpTests() - addChanDumpTests() - addFuncDumpTests() - addCircularDumpTests() - addPanicDumpTests() - addErrorDumpTests() - addCgoDumpTests() - - t.Logf("Running %d tests", len(dumpTests)) - for i, test := range dumpTests { - buf := new(bytes.Buffer) - spew.Fdump(buf, test.in) - s := buf.String() - if testFailed(s, test.wants) { - t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants)) - continue - } - } -} - -func TestDumpSortedKeys(t *testing.T) { - cfg := spew.ConfigState{SortKeys: true} - s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"}) - expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " + - "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " + - "(len=1) \"3\"\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2}) - expected = "(map[spew_test.stringer]int) (len=3) {\n" + - "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" + - "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" + - "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) - expected = "(map[spew_test.pstringer]int) (len=3) {\n" + - "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" + - "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" + - "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" + - "}\n" - if spew.UnsafeDisabled { - expected = "(map[spew_test.pstringer]int) (len=3) {\n" + - "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" + - "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" + - "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" + - "}\n" - } - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) - expected = "(map[spew_test.customError]int) (len=3) {\n" + - "(spew_test.customError) error: 1: (int) 1,\n" + - "(spew_test.customError) error: 2: (int) 2,\n" + - "(spew_test.customError) error: 3: (int) 3\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go deleted file mode 100644 index 108baa55..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2013-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when both cgo is supported and "-tags testcgo" is added to the go test -// command line. This means the cgo tests are only added (and hence run) when -// specifially requested. This configuration is used because spew itself -// does not require cgo to run even though it does handle certain cgo types -// specially. Rather than forcing all clients to require cgo and an external -// C compiler just to run the tests, this scheme makes them optional. -// +build cgo,testcgo - -package spew_test - -import ( - "fmt" - - "github.com/davecgh/go-spew/spew/testdata" -) - -func addCgoDumpTests() { - // C char pointer. - v := testdata.GetCgoCharPointer() - nv := testdata.GetCgoNullCharPointer() - pv := &v - vcAddr := fmt.Sprintf("%p", v) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "*testdata._Ctype_char" - vs := "116" - addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") - addDumpTest(nv, "("+vt+")()\n") - - // C char array. - v2, v2l, v2c := testdata.GetCgoCharArray() - v2Len := fmt.Sprintf("%d", v2l) - v2Cap := fmt.Sprintf("%d", v2c) - v2t := "[6]testdata._Ctype_char" - v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " + - "{\n 00000000 74 65 73 74 32 00 " + - " |test2.|\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - - // C unsigned char array. - v3, v3l, v3c := testdata.GetCgoUnsignedCharArray() - v3Len := fmt.Sprintf("%d", v3l) - v3Cap := fmt.Sprintf("%d", v3c) - v3t := "[6]testdata._Ctype_unsignedchar" - v3t2 := "[6]testdata._Ctype_uchar" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + - "{\n 00000000 74 65 73 74 33 00 " + - " |test3.|\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n") - - // C signed char array. - v4, v4l, v4c := testdata.GetCgoSignedCharArray() - v4Len := fmt.Sprintf("%d", v4l) - v4Cap := fmt.Sprintf("%d", v4c) - v4t := "[6]testdata._Ctype_schar" - v4t2 := "testdata._Ctype_schar" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + - ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + - ") 0\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - - // C uint8_t array. - v5, v5l, v5c := testdata.GetCgoUint8tArray() - v5Len := fmt.Sprintf("%d", v5l) - v5Cap := fmt.Sprintf("%d", v5c) - v5t := "[6]testdata._Ctype_uint8_t" - v5t2 := "[6]testdata._Ctype_uchar" - v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + - "{\n 00000000 74 65 73 74 35 00 " + - " |test5.|\n}" - addDumpTest(v5, "("+v5t+") "+v5s+"\n", "("+v5t2+") "+v5s+"\n") - - // C typedefed unsigned char array. - v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() - v6Len := fmt.Sprintf("%d", v6l) - v6Cap := fmt.Sprintf("%d", v6c) - v6t := "[6]testdata._Ctype_custom_uchar_t" - v6t2 := "[6]testdata._Ctype_uchar" - v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + - "{\n 00000000 74 65 73 74 36 00 " + - " |test6.|\n}" - addDumpTest(v6, "("+v6t+") "+v6s+"\n", "("+v6t2+") "+v6s+"\n") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go deleted file mode 100644 index 52a0971f..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2013 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when either cgo is not supported or "-tags testcgo" is not added to the go -// test command line. This file intentionally does not setup any cgo tests in -// this scenario. -// +build !cgo !testcgo - -package spew_test - -func addCgoDumpTests() { - // Don't add any tests for cgo since this file is only compiled when - // there should not be any cgo tests. -} diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go deleted file mode 100644 index c6ec8c6d..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/example_test.go +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "fmt" - - "github.com/davecgh/go-spew/spew" -) - -type Flag int - -const ( - flagOne Flag = iota - flagTwo -) - -var flagStrings = map[Flag]string{ - flagOne: "flagOne", - flagTwo: "flagTwo", -} - -func (f Flag) String() string { - if s, ok := flagStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown flag (%d)", int(f)) -} - -type Bar struct { - data uintptr -} - -type Foo struct { - unexportedField Bar - ExportedField map[interface{}]interface{} -} - -// This example demonstrates how to use Dump to dump variables to stdout. -func ExampleDump() { - // The following package level declarations are assumed for this example: - /* - type Flag int - - const ( - flagOne Flag = iota - flagTwo - ) - - var flagStrings = map[Flag]string{ - flagOne: "flagOne", - flagTwo: "flagTwo", - } - - func (f Flag) String() string { - if s, ok := flagStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown flag (%d)", int(f)) - } - - type Bar struct { - data uintptr - } - - type Foo struct { - unexportedField Bar - ExportedField map[interface{}]interface{} - } - */ - - // Setup some sample data structures for the example. - bar := Bar{uintptr(0)} - s1 := Foo{bar, map[interface{}]interface{}{"one": true}} - f := Flag(5) - b := []byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - - // Dump! - spew.Dump(s1, f, b) - - // Output: - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // (spew_test.Flag) Unknown flag (5) - // ([]uint8) (len=34 cap=34) { - // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - // 00000020 31 32 |12| - // } - // -} - -// This example demonstrates how to use Printf to display a variable with a -// format string and inline formatting. -func ExamplePrintf() { - // Create a double pointer to a uint 8. - ui8 := uint8(5) - pui8 := &ui8 - ppui8 := &pui8 - - // Create a circular data type. - type circular struct { - ui8 uint8 - c *circular - } - c := circular{ui8: 1} - c.c = &c - - // Print! - spew.Printf("ppui8: %v\n", ppui8) - spew.Printf("circular: %v\n", c) - - // Output: - // ppui8: <**>5 - // circular: {1 <*>{1 <*>}} -} - -// This example demonstrates how to use a ConfigState. -func ExampleConfigState() { - // Modify the indent level of the ConfigState only. The global - // configuration is not modified. - scs := spew.ConfigState{Indent: "\t"} - - // Output using the ConfigState instance. - v := map[string]int{"one": 1} - scs.Printf("v: %v\n", v) - scs.Dump(v) - - // Output: - // v: map[one:1] - // (map[string]int) (len=1) { - // (string) (len=3) "one": (int) 1 - // } -} - -// This example demonstrates how to use ConfigState.Dump to dump variables to -// stdout -func ExampleConfigState_Dump() { - // See the top-level Dump example for details on the types used in this - // example. - - // Create two ConfigState instances with different indentation. - scs := spew.ConfigState{Indent: "\t"} - scs2 := spew.ConfigState{Indent: " "} - - // Setup some sample data structures for the example. - bar := Bar{uintptr(0)} - s1 := Foo{bar, map[interface{}]interface{}{"one": true}} - - // Dump using the ConfigState instances. - scs.Dump(s1) - scs2.Dump(s1) - - // Output: - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // -} - -// This example demonstrates how to use ConfigState.Printf to display a variable -// with a format string and inline formatting. -func ExampleConfigState_Printf() { - // See the top-level Dump example for details on the types used in this - // example. - - // Create two ConfigState instances and modify the method handling of the - // first ConfigState only. - scs := spew.NewDefaultConfig() - scs2 := spew.NewDefaultConfig() - scs.DisableMethods = true - - // Alternatively - // scs := spew.ConfigState{Indent: " ", DisableMethods: true} - // scs2 := spew.ConfigState{Indent: " "} - - // This is of type Flag which implements a Stringer and has raw value 1. - f := flagTwo - - // Dump using the ConfigState instances. - scs.Printf("f: %v\n", f) - scs2.Printf("f: %v\n", f) - - // Output: - // f: 1 - // f: flagTwo -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go deleted file mode 100644 index 87ee9651..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/format_test.go +++ /dev/null @@ -1,1558 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Test Summary: -NOTE: For each test, a nil pointer, a single pointer and double pointer to the -base test element are also tested to ensure proper indirection across all types. - -- Max int8, int16, int32, int64, int -- Max uint8, uint16, uint32, uint64, uint -- Boolean true and false -- Standard complex64 and complex128 -- Array containing standard ints -- Array containing type with custom formatter on pointer receiver only -- Array containing interfaces -- Slice containing standard float32 values -- Slice containing type with custom formatter on pointer receiver only -- Slice containing interfaces -- Nil slice -- Standard string -- Nil interface -- Sub-interface -- Map with string keys and int vals -- Map with custom formatter type on pointer receiver only keys and vals -- Map with interface keys and values -- Map with nil interface value -- Struct with primitives -- Struct that contains another struct -- Struct that contains custom type with Stringer pointer interface via both - exported and unexported fields -- Struct that contains embedded struct and field to same struct -- Uintptr to 0 (null pointer) -- Uintptr address of real variable -- Unsafe.Pointer to 0 (null pointer) -- Unsafe.Pointer to address of real variable -- Nil channel -- Standard int channel -- Function with no params and no returns -- Function with param and no returns -- Function with multiple params and multiple returns -- Struct that is circular through self referencing -- Structs that are circular through cross referencing -- Structs that are indirectly circular -- Type that panics in its Stringer interface -- Type that has a custom Error interface -- %x passthrough with uint -- %#x passthrough with uint -- %f passthrough with precision -- %f passthrough with width and precision -- %d passthrough with width -- %q passthrough with string -*/ - -package spew_test - -import ( - "bytes" - "fmt" - "testing" - "unsafe" - - "github.com/davecgh/go-spew/spew" -) - -// formatterTest is used to describe a test to be performed against NewFormatter. -type formatterTest struct { - format string - in interface{} - wants []string -} - -// formatterTests houses all of the tests to be performed against NewFormatter. -var formatterTests = make([]formatterTest, 0) - -// addFormatterTest is a helper method to append the passed input and desired -// result to formatterTests. -func addFormatterTest(format string, in interface{}, wants ...string) { - test := formatterTest{format, in, wants} - formatterTests = append(formatterTests, test) -} - -func addIntFormatterTests() { - // Max int8. - v := int8(127) - nv := (*int8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int8" - vs := "127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Max int16. - v2 := int16(32767) - nv2 := (*int16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "int16" - v2s := "32767" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Max int32. - v3 := int32(2147483647) - nv3 := (*int32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "int32" - v3s := "2147483647" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - - // Max int64. - v4 := int64(9223372036854775807) - nv4 := (*int64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "int64" - v4s := "9223372036854775807" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") - - // Max int. - v5 := int(2147483647) - nv5 := (*int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "int" - v5s := "2147483647" - addFormatterTest("%v", v5, v5s) - addFormatterTest("%v", pv5, "<*>"+v5s) - addFormatterTest("%v", &pv5, "<**>"+v5s) - addFormatterTest("%v", nv5, "") - addFormatterTest("%+v", v5, v5s) - addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) - addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%+v", nv5, "") - addFormatterTest("%#v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) - addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") - addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) - addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"") -} - -func addUintFormatterTests() { - // Max uint8. - v := uint8(255) - nv := (*uint8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uint8" - vs := "255" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Max uint16. - v2 := uint16(65535) - nv2 := (*uint16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Max uint32. - v3 := uint32(4294967295) - nv3 := (*uint32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "uint32" - v3s := "4294967295" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - - // Max uint64. - v4 := uint64(18446744073709551615) - nv4 := (*uint64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "uint64" - v4s := "18446744073709551615" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") - - // Max uint. - v5 := uint(4294967295) - nv5 := (*uint)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "uint" - v5s := "4294967295" - addFormatterTest("%v", v5, v5s) - addFormatterTest("%v", pv5, "<*>"+v5s) - addFormatterTest("%v", &pv5, "<**>"+v5s) - addFormatterTest("%v", nv5, "") - addFormatterTest("%+v", v5, v5s) - addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) - addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%+v", nv5, "") - addFormatterTest("%#v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) - addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") - addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) - addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") -} - -func addBoolFormatterTests() { - // Boolean true. - v := bool(true) - nv := (*bool)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "bool" - vs := "true" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Boolean false. - v2 := bool(false) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "bool" - v2s := "false" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addFloatFormatterTests() { - // Standard float32. - v := float32(3.1415) - nv := (*float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "3.1415" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Standard float64. - v2 := float64(3.1415926) - nv2 := (*float64)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "float64" - v2s := "3.1415926" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") -} - -func addComplexFormatterTests() { - // Standard complex64. - v := complex(float32(6), -2) - nv := (*complex64)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "complex64" - vs := "(6-2i)" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Standard complex128. - v2 := complex(float64(-6), 2) - nv2 := (*complex128)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "complex128" - v2s := "(-6+2i)" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") -} - -func addArrayFormatterTests() { - // Array containing standard ints. - v := [3]int{1, 2, 3} - nv := (*[3]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "[3]int" - vs := "[1 2 3]" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Array containing type with custom formatter on pointer receiver only. - v2 := [3]pstringer{"1", "2", "3"} - nv2 := (*[3]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "[3]spew_test.pstringer" - v2sp := "[stringer 1 stringer 2 stringer 3]" - v2s := v2sp - if spew.UnsafeDisabled { - v2s = "[1 2 3]" - } - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2sp) - addFormatterTest("%v", &pv2, "<**>"+v2sp) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Array containing interfaces. - v3 := [3]interface{}{"one", int(2), uint(3)} - nv3 := (*[3]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[3]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3s := "[one 2 3]" - v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") -} - -func addSliceFormatterTests() { - // Slice containing standard float32 values. - v := []float32{3.14, 6.28, 12.56} - nv := (*[]float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "[]float32" - vs := "[3.14 6.28 12.56]" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Slice containing type with custom formatter on pointer receiver only. - v2 := []pstringer{"1", "2", "3"} - nv2 := (*[]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "[]spew_test.pstringer" - v2s := "[stringer 1 stringer 2 stringer 3]" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Slice containing interfaces. - v3 := []interface{}{"one", int(2), uint(3), nil} - nv3 := (*[]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3t5 := "interface {}" - v3s := "[one 2 3 ]" - v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 + - ")]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Nil slice. - var v4 []int - nv4 := (*[]int)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[]int" - v4s := "" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addStringFormatterTests() { - // Standard string. - v := "test" - nv := (*string)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "string" - vs := "test" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addInterfaceFormatterTests() { - // Nil interface. - var v interface{} - nv := (*interface{})(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "interface {}" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Sub-interface. - v2 := interface{}(uint16(65535)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addMapFormatterTests() { - // Map with string keys and int vals. - v := map[string]int{"one": 1, "two": 2} - nilMap := map[string]int(nil) - nv := (*map[string]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "map[string]int" - vs := "map[one:1 two:2]" - vs2 := "map[two:2 one:1]" - addFormatterTest("%v", v, vs, vs2) - addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2) - addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2) - addFormatterTest("%+v", nilMap, "") - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs, vs2) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs, - "<**>("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%+v", nilMap, "") - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2) - addFormatterTest("%#v", nilMap, "("+vt+")"+"") - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs, - "(*"+vt+")("+vAddr+")"+vs2) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs, - "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%#+v", nilMap, "("+vt+")"+"") - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Map with custom formatter type on pointer receiver only keys and vals. - v2 := map[pstringer]pstringer{"one": "1"} - nv2 := (*map[pstringer]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "map[spew_test.pstringer]spew_test.pstringer" - v2s := "map[stringer one:stringer 1]" - if spew.UnsafeDisabled { - v2s = "map[one:1]" - } - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Map with interface keys and values. - v3 := map[interface{}]interface{}{"one": 1} - nv3 := (*map[interface{}]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "map[interface {}]interface {}" - v3t1 := "string" - v3t2 := "int" - v3s := "map[one:1]" - v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Map with nil interface value - v4 := map[string]interface{}{"nil": nil} - nv4 := (*map[string]interface{})(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "map[string]interface {}" - v4t1 := "interface {}" - v4s := "map[nil:]" - v4s2 := "map[nil:(" + v4t1 + ")]" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s2) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addStructFormatterTests() { - // Struct with primitives. - type s1 struct { - a int8 - b uint8 - } - v := s1{127, 255} - nv := (*s1)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.s1" - vt2 := "int8" - vt3 := "uint8" - vs := "{127 255}" - vs2 := "{a:127 b:255}" - vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs2) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs3) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs3) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs3) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Struct that contains another struct. - type s2 struct { - s1 s1 - b bool - } - v2 := s2{s1{127, 255}, true} - nv2 := (*s2)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.s2" - v2t2 := "spew_test.s1" - v2t3 := "int8" - v2t4 := "uint8" - v2t5 := "bool" - v2s := "{{127 255} true}" - v2s2 := "{s1:{a:127 b:255} b:true}" - v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" + - v2t5 + ")true}" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s2) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s3) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Struct that contains custom type with Stringer pointer interface via both - // exported and unexported fields. - type s3 struct { - s pstringer - S pstringer - } - v3 := s3{"test", "test2"} - nv3 := (*s3)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.s3" - v3t2 := "spew_test.pstringer" - v3s := "{stringer test stringer test2}" - v3sp := v3s - v3s2 := "{s:stringer test S:stringer test2}" - v3s2p := v3s2 - v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}" - v3s3p := v3s3 - if spew.UnsafeDisabled { - v3s = "{test test2}" - v3sp = "{test stringer test2}" - v3s2 = "{s:test S:test2}" - v3s2p = "{s:test S:stringer test2}" - v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}" - v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}" - } - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3sp) - addFormatterTest("%v", &pv3, "<**>"+v3sp) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s2) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s3) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Struct that contains embedded struct and field to same struct. - e := embed{"embedstr"} - v4 := embedwrap{embed: &e, e: &e} - nv4 := (*embedwrap)(nil) - pv4 := &v4 - eAddr := fmt.Sprintf("%p", &e) - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "spew_test.embedwrap" - v4t2 := "spew_test.embed" - v4t3 := "string" - v4s := "{<*>{embedstr} <*>{embedstr}}" - v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr + - "){a:embedstr}}" - v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 + - "){a:(" + v4t3 + ")embedstr}}" - v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + - ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s2) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s3) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addUintptrFormatterTests() { - // Null pointer. - v := uintptr(0) - nv := (*uintptr)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uintptr" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Address of real variable. - i := 1 - v2 := uintptr(unsafe.Pointer(&i)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uintptr" - v2s := fmt.Sprintf("%p", &i) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addUnsafePointerFormatterTests() { - // Null pointer. - v := unsafe.Pointer(nil) - nv := (*unsafe.Pointer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "unsafe.Pointer" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Address of real variable. - i := 1 - v2 := unsafe.Pointer(&i) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "unsafe.Pointer" - v2s := fmt.Sprintf("%p", &i) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addChanFormatterTests() { - // Nil channel. - var v chan int - pv := &v - nv := (*chan int)(nil) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "chan int" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Real channel. - v2 := make(chan int) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "chan int" - v2s := fmt.Sprintf("%p", v2) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addFuncFormatterTests() { - // Function with no params and no returns. - v := addIntFormatterTests - nv := (*func())(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "func()" - vs := fmt.Sprintf("%p", v) - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Function with param and no returns. - v2 := TestFormatter - nv2 := (*func(*testing.T))(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "func(*testing.T)" - v2s := fmt.Sprintf("%p", v2) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Function with multiple params and multiple returns. - var v3 = func(i int, s string) (b bool, err error) { - return true, nil - } - nv3 := (*func(int, string) (bool, error))(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "func(int, string) (bool, error)" - v3s := fmt.Sprintf("%p", v3) - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") -} - -func addCircularFormatterTests() { - // Struct that is circular through self referencing. - type circular struct { - c *circular - } - v := circular{nil} - v.c = &v - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.circular" - vs := "{<*>{<*>}}" - vs2 := "{<*>}" - vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")}}" - vs4 := "{c:<*>(" + vAddr + ")}" - vs5 := "{c:(*" + vt + "){c:(*" + vt + ")}}" - vs6 := "{c:(*" + vt + ")}" - vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr + - ")}}" - vs8 := "{c:(*" + vt + ")(" + vAddr + ")}" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs2) - addFormatterTest("%v", &pv, "<**>"+vs2) - addFormatterTest("%+v", v, vs3) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4) - addFormatterTest("%#v", v, "("+vt+")"+vs5) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs6) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6) - addFormatterTest("%#+v", v, "("+vt+")"+vs7) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8) - - // Structs that are circular through cross referencing. - v2 := xref1{nil} - ts2 := xref2{&v2} - v2.ps2 = &ts2 - pv2 := &v2 - ts2Addr := fmt.Sprintf("%p", &ts2) - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.xref1" - v2t2 := "spew_test.xref2" - v2s := "{<*>{<*>{<*>}}}" - v2s2 := "{<*>{<*>}}" - v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" + - ts2Addr + ")}}}" - v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")}}" - v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 + - ")}}}" - v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")}}" - v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + - ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr + - ")}}}" - v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + - ")(" + v2Addr + ")}}" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s2) - addFormatterTest("%v", &pv2, "<**>"+v2s2) - addFormatterTest("%+v", v2, v2s3) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s5) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8) - - // Structs that are indirectly circular. - v3 := indirCir1{nil} - tic2 := indirCir2{nil} - tic3 := indirCir3{&v3} - tic2.ps3 = &tic3 - v3.ps2 = &tic2 - pv3 := &v3 - tic2Addr := fmt.Sprintf("%p", &tic2) - tic3Addr := fmt.Sprintf("%p", &tic3) - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.indirCir1" - v3t2 := "spew_test.indirCir2" - v3t3 := "spew_test.indirCir3" - v3s := "{<*>{<*>{<*>{<*>}}}}" - v3s2 := "{<*>{<*>{<*>}}}" - v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + - v3Addr + "){ps2:<*>(" + tic2Addr + ")}}}}" - v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + - v3Addr + ")}}}" - v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + - "){ps2:(*" + v3t2 + ")}}}}" - v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + - ")}}}" - v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + - tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 + - ")(" + tic2Addr + ")}}}}" - v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + - tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")}}}" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s2) - addFormatterTest("%v", &pv3, "<**>"+v3s2) - addFormatterTest("%+v", v3, v3s3) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4) - addFormatterTest("%#v", v3, "("+v3t+")"+v3s5) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6) - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8) -} - -func addPanicFormatterTests() { - // Type that panics in its Stringer interface. - v := panicer(127) - nv := (*panicer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.panicer" - vs := "(PANIC=test panic)127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addErrorFormatterTests() { - // Type that has a custom Error interface. - v := customError(127) - nv := (*customError)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.customError" - vs := "error: 127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addPassthroughFormatterTests() { - // %x passthrough with uint. - v := uint(4294967295) - pv := &v - vAddr := fmt.Sprintf("%x", pv) - pvAddr := fmt.Sprintf("%x", &pv) - vs := "ffffffff" - addFormatterTest("%x", v, vs) - addFormatterTest("%x", pv, vAddr) - addFormatterTest("%x", &pv, pvAddr) - - // %#x passthrough with uint. - v2 := int(2147483647) - pv2 := &v2 - v2Addr := fmt.Sprintf("%#x", pv2) - pv2Addr := fmt.Sprintf("%#x", &pv2) - v2s := "0x7fffffff" - addFormatterTest("%#x", v2, v2s) - addFormatterTest("%#x", pv2, v2Addr) - addFormatterTest("%#x", &pv2, pv2Addr) - - // %f passthrough with precision. - addFormatterTest("%.2f", 3.1415, "3.14") - addFormatterTest("%.3f", 3.1415, "3.142") - addFormatterTest("%.4f", 3.1415, "3.1415") - - // %f passthrough with width and precision. - addFormatterTest("%5.2f", 3.1415, " 3.14") - addFormatterTest("%6.3f", 3.1415, " 3.142") - addFormatterTest("%7.4f", 3.1415, " 3.1415") - - // %d passthrough with width. - addFormatterTest("%3d", 127, "127") - addFormatterTest("%4d", 127, " 127") - addFormatterTest("%5d", 127, " 127") - - // %q passthrough with string. - addFormatterTest("%q", "test", "\"test\"") -} - -// TestFormatter executes all of the tests described by formatterTests. -func TestFormatter(t *testing.T) { - // Setup tests. - addIntFormatterTests() - addUintFormatterTests() - addBoolFormatterTests() - addFloatFormatterTests() - addComplexFormatterTests() - addArrayFormatterTests() - addSliceFormatterTests() - addStringFormatterTests() - addInterfaceFormatterTests() - addMapFormatterTests() - addStructFormatterTests() - addUintptrFormatterTests() - addUnsafePointerFormatterTests() - addChanFormatterTests() - addFuncFormatterTests() - addCircularFormatterTests() - addPanicFormatterTests() - addErrorFormatterTests() - addPassthroughFormatterTests() - - t.Logf("Running %d tests", len(formatterTests)) - for i, test := range formatterTests { - buf := new(bytes.Buffer) - spew.Fprintf(buf, test.format, test.in) - s := buf.String() - if testFailed(s, test.wants) { - t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s, - stringizeWants(test.wants)) - continue - } - } -} - -type testStruct struct { - x int -} - -func (ts testStruct) String() string { - return fmt.Sprintf("ts.%d", ts.x) -} - -type testStructP struct { - x int -} - -func (ts *testStructP) String() string { - return fmt.Sprintf("ts.%d", ts.x) -} - -func TestPrintSortedKeys(t *testing.T) { - cfg := spew.ConfigState{SortKeys: true} - s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"}) - expected := "map[1:1 2:2 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 1:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2}) - expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 2:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) - expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" - if spew.UnsafeDisabled { - expected = "map[1:1 2:2 3:3]" - } - if s != expected { - t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[testStruct]int{{1}: 1, {3}: 3, {2}: 2}) - expected = "map[ts.1:1 ts.2:2 ts.3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected) - } - - if !spew.UnsafeDisabled { - s = cfg.Sprint(map[testStructP]int{{1}: 1, {3}: 3, {2}: 2}) - expected = "map[ts.1:1 ts.2:2 ts.3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected) - } - } - - s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) - expected = "map[error: 1:1 error: 2:2 error: 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 6:\n %v %v", s, expected) - } -} diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go deleted file mode 100644 index e312b4fa..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/internal_test.go +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -This test file is part of the spew package rather than than the spew_test -package because it needs access to internals to properly test certain cases -which are not possible via the public interface since they should never happen. -*/ - -package spew - -import ( - "bytes" - "reflect" - "testing" -) - -// dummyFmtState implements a fake fmt.State to use for testing invalid -// reflect.Value handling. This is necessary because the fmt package catches -// invalid values before invoking the formatter on them. -type dummyFmtState struct { - bytes.Buffer -} - -func (dfs *dummyFmtState) Flag(f int) bool { - return f == int('+') -} - -func (dfs *dummyFmtState) Precision() (int, bool) { - return 0, false -} - -func (dfs *dummyFmtState) Width() (int, bool) { - return 0, false -} - -// TestInvalidReflectValue ensures the dump and formatter code handles an -// invalid reflect value properly. This needs access to internal state since it -// should never happen in real code and therefore can't be tested via the public -// API. -func TestInvalidReflectValue(t *testing.T) { - i := 1 - - // Dump invalid reflect value. - v := new(reflect.Value) - buf := new(bytes.Buffer) - d := dumpState{w: buf, cs: &Config} - d.dump(*v) - s := buf.String() - want := "" - if s != want { - t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want) - } - i++ - - // Formatter invalid reflect value. - buf2 := new(dummyFmtState) - f := formatState{value: *v, cs: &Config, fs: buf2} - f.format(*v) - s = buf2.String() - want = "" - if s != want { - t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want) - } -} - -// SortValues makes the internal sortValues function available to the test -// package. -func SortValues(values []reflect.Value, cs *ConfigState) { - sortValues(values, cs) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go deleted file mode 100644 index 80dc2217..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2013-2016 Dave Collins - -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. - -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -/* -This test file is part of the spew package rather than than the spew_test -package because it needs access to internals to properly test certain cases -which are not possible via the public interface since they should never happen. -*/ - -package spew - -import ( - "bytes" - "reflect" - "testing" -) - -// changeKind uses unsafe to intentionally change the kind of a reflect.Value to -// the maximum kind value which does not exist. This is needed to test the -// fallback code which punts to the standard fmt library for new types that -// might get added to the language. -func changeKind(v *reflect.Value, readOnly bool) { - flags := flagField(v) - if readOnly { - *flags |= flagRO - } else { - *flags &^= flagRO - } - *flags |= flagKindMask -} - -// TestAddedReflectValue tests functionaly of the dump and formatter code which -// falls back to the standard fmt library for new types that might get added to -// the language. -func TestAddedReflectValue(t *testing.T) { - i := 1 - - // Dump using a reflect.Value that is exported. - v := reflect.ValueOf(int8(5)) - changeKind(&v, false) - buf := new(bytes.Buffer) - d := dumpState{w: buf, cs: &Config} - d.dump(v) - s := buf.String() - want := "(int8) 5" - if s != want { - t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want) - } - i++ - - // Dump using a reflect.Value that is not exported. - changeKind(&v, true) - buf.Reset() - d.dump(v) - s = buf.String() - want = "(int8) " - if s != want { - t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want) - } - i++ - - // Formatter using a reflect.Value that is exported. - changeKind(&v, false) - buf2 := new(dummyFmtState) - f := formatState{value: v, cs: &Config, fs: buf2} - f.format(v) - s = buf2.String() - want = "5" - if s != want { - t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want) - } - i++ - - // Formatter using a reflect.Value that is not exported. - changeKind(&v, true) - buf2.Reset() - f = formatState{value: v, cs: &Config, fs: buf2} - f.format(v) - s = buf2.String() - want = "" - if s != want { - t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want) - } -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew_test.go b/vendor/github.com/davecgh/go-spew/spew/spew_test.go deleted file mode 100644 index b70466c6..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew_test.go +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -// spewFunc is used to identify which public function of the spew package or -// ConfigState a test applies to. -type spewFunc int - -const ( - fCSFdump spewFunc = iota - fCSFprint - fCSFprintf - fCSFprintln - fCSPrint - fCSPrintln - fCSSdump - fCSSprint - fCSSprintf - fCSSprintln - fCSErrorf - fCSNewFormatter - fErrorf - fFprint - fFprintln - fPrint - fPrintln - fSdump - fSprint - fSprintf - fSprintln -) - -// Map of spewFunc values to names for pretty printing. -var spewFuncStrings = map[spewFunc]string{ - fCSFdump: "ConfigState.Fdump", - fCSFprint: "ConfigState.Fprint", - fCSFprintf: "ConfigState.Fprintf", - fCSFprintln: "ConfigState.Fprintln", - fCSSdump: "ConfigState.Sdump", - fCSPrint: "ConfigState.Print", - fCSPrintln: "ConfigState.Println", - fCSSprint: "ConfigState.Sprint", - fCSSprintf: "ConfigState.Sprintf", - fCSSprintln: "ConfigState.Sprintln", - fCSErrorf: "ConfigState.Errorf", - fCSNewFormatter: "ConfigState.NewFormatter", - fErrorf: "spew.Errorf", - fFprint: "spew.Fprint", - fFprintln: "spew.Fprintln", - fPrint: "spew.Print", - fPrintln: "spew.Println", - fSdump: "spew.Sdump", - fSprint: "spew.Sprint", - fSprintf: "spew.Sprintf", - fSprintln: "spew.Sprintln", -} - -func (f spewFunc) String() string { - if s, ok := spewFuncStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown spewFunc (%d)", int(f)) -} - -// spewTest is used to describe a test to be performed against the public -// functions of the spew package or ConfigState. -type spewTest struct { - cs *spew.ConfigState - f spewFunc - format string - in interface{} - want string -} - -// spewTests houses the tests to be performed against the public functions of -// the spew package and ConfigState. -// -// These tests are only intended to ensure the public functions are exercised -// and are intentionally not exhaustive of types. The exhaustive type -// tests are handled in the dump and format tests. -var spewTests []spewTest - -// redirStdout is a helper function to return the standard output from f as a -// byte slice. -func redirStdout(f func()) ([]byte, error) { - tempFile, err := ioutil.TempFile("", "ss-test") - if err != nil { - return nil, err - } - fileName := tempFile.Name() - defer os.Remove(fileName) // Ignore error - - origStdout := os.Stdout - os.Stdout = tempFile - f() - os.Stdout = origStdout - tempFile.Close() - - return ioutil.ReadFile(fileName) -} - -func initSpewTests() { - // Config states with various settings. - scsDefault := spew.NewDefaultConfig() - scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true} - scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} - scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} - scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} - scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true} - scsNoCap := &spew.ConfigState{DisableCapacities: true} - - // Variables for tests on types which implement Stringer interface with and - // without a pointer receiver. - ts := stringer("test") - tps := pstringer("test") - - type ptrTester struct { - s *struct{} - } - tptr := &ptrTester{s: &struct{}{}} - - // depthTester is used to test max depth handling for structs, array, slices - // and maps. - type depthTester struct { - ic indirCir1 - arr [1]string - slice []string - m map[string]int - } - dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"}, - map[string]int{"one": 1}} - - // Variable for tests on types which implement error interface. - te := customError(10) - - spewTests = []spewTest{ - {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"}, - {scsDefault, fCSFprint, "", int16(32767), "32767"}, - {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"}, - {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"}, - {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"}, - {scsDefault, fCSPrintln, "", uint8(255), "255\n"}, - {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"}, - {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"}, - {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"}, - {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"}, - {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"}, - {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"}, - {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"}, - {scsDefault, fFprint, "", float32(3.14), "3.14"}, - {scsDefault, fFprintln, "", float64(6.28), "6.28\n"}, - {scsDefault, fPrint, "", true, "true"}, - {scsDefault, fPrintln, "", false, "false\n"}, - {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"}, - {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"}, - {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"}, - {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"}, - {scsNoMethods, fCSFprint, "", ts, "test"}, - {scsNoMethods, fCSFprint, "", &ts, "<*>test"}, - {scsNoMethods, fCSFprint, "", tps, "test"}, - {scsNoMethods, fCSFprint, "", &tps, "<*>test"}, - {scsNoPmethods, fCSFprint, "", ts, "stringer test"}, - {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"}, - {scsNoPmethods, fCSFprint, "", tps, "test"}, - {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"}, - {scsMaxDepth, fCSFprint, "", dt, "{{} [] [] map[]}"}, - {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" + - " ic: (spew_test.indirCir1) {\n \n },\n" + - " arr: ([1]string) (len=1 cap=1) {\n \n },\n" + - " slice: ([]string) (len=1 cap=1) {\n \n },\n" + - " m: (map[string]int) (len=1) {\n \n }\n}\n"}, - {scsContinue, fCSFprint, "", ts, "(stringer test) test"}, - {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " + - "(len=4) (stringer test) \"test\"\n"}, - {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, - {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + - "(error: 10) 10\n"}, - {scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"}, - {scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"}, - {scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"}, - {scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"}, - } -} - -// TestSpew executes all of the tests described by spewTests. -func TestSpew(t *testing.T) { - initSpewTests() - - t.Logf("Running %d tests", len(spewTests)) - for i, test := range spewTests { - buf := new(bytes.Buffer) - switch test.f { - case fCSFdump: - test.cs.Fdump(buf, test.in) - - case fCSFprint: - test.cs.Fprint(buf, test.in) - - case fCSFprintf: - test.cs.Fprintf(buf, test.format, test.in) - - case fCSFprintln: - test.cs.Fprintln(buf, test.in) - - case fCSPrint: - b, err := redirStdout(func() { test.cs.Print(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fCSPrintln: - b, err := redirStdout(func() { test.cs.Println(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fCSSdump: - str := test.cs.Sdump(test.in) - buf.WriteString(str) - - case fCSSprint: - str := test.cs.Sprint(test.in) - buf.WriteString(str) - - case fCSSprintf: - str := test.cs.Sprintf(test.format, test.in) - buf.WriteString(str) - - case fCSSprintln: - str := test.cs.Sprintln(test.in) - buf.WriteString(str) - - case fCSErrorf: - err := test.cs.Errorf(test.format, test.in) - buf.WriteString(err.Error()) - - case fCSNewFormatter: - fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in)) - - case fErrorf: - err := spew.Errorf(test.format, test.in) - buf.WriteString(err.Error()) - - case fFprint: - spew.Fprint(buf, test.in) - - case fFprintln: - spew.Fprintln(buf, test.in) - - case fPrint: - b, err := redirStdout(func() { spew.Print(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fPrintln: - b, err := redirStdout(func() { spew.Println(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fSdump: - str := spew.Sdump(test.in) - buf.WriteString(str) - - case fSprint: - str := spew.Sprint(test.in) - buf.WriteString(str) - - case fSprintf: - str := spew.Sprintf(test.format, test.in) - buf.WriteString(str) - - case fSprintln: - str := spew.Sprintln(test.in) - buf.WriteString(str) - - default: - t.Errorf("%v #%d unrecognized function", test.f, i) - continue - } - s := buf.String() - if test.want != s { - t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want) - continue - } - } -} diff --git a/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go b/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go deleted file mode 100644 index 5c87dd45..00000000 --- a/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2013 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when both cgo is supported and "-tags testcgo" is added to the go test -// command line. This code should really only be in the dumpcgo_test.go file, -// but unfortunately Go will not allow cgo in test files, so this is a -// workaround to allow cgo types to be tested. This configuration is used -// because spew itself does not require cgo to run even though it does handle -// certain cgo types specially. Rather than forcing all clients to require cgo -// and an external C compiler just to run the tests, this scheme makes them -// optional. -// +build cgo,testcgo - -package testdata - -/* -#include -typedef unsigned char custom_uchar_t; - -char *ncp = 0; -char *cp = "test"; -char ca[6] = {'t', 'e', 's', 't', '2', '\0'}; -unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'}; -signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'}; -uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'}; -custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'}; -*/ -import "C" - -// GetCgoNullCharPointer returns a null char pointer via cgo. This is only -// used for tests. -func GetCgoNullCharPointer() interface{} { - return C.ncp -} - -// GetCgoCharPointer returns a char pointer via cgo. This is only used for -// tests. -func GetCgoCharPointer() interface{} { - return C.cp -} - -// GetCgoCharArray returns a char array via cgo and the array's len and cap. -// This is only used for tests. -func GetCgoCharArray() (interface{}, int, int) { - return C.ca, len(C.ca), cap(C.ca) -} - -// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the -// array's len and cap. This is only used for tests. -func GetCgoUnsignedCharArray() (interface{}, int, int) { - return C.uca, len(C.uca), cap(C.uca) -} - -// GetCgoSignedCharArray returns a signed char array via cgo and the array's len -// and cap. This is only used for tests. -func GetCgoSignedCharArray() (interface{}, int, int) { - return C.sca, len(C.sca), cap(C.sca) -} - -// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and -// cap. This is only used for tests. -func GetCgoUint8tArray() (interface{}, int, int) { - return C.ui8ta, len(C.ui8ta), cap(C.ui8ta) -} - -// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via -// cgo and the array's len and cap. This is only used for tests. -func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) { - return C.tuca, len(C.tuca), cap(C.tuca) -} diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt deleted file mode 100644 index 2cd087a2..00000000 --- a/vendor/github.com/davecgh/go-spew/test_coverage.txt +++ /dev/null @@ -1,61 +0,0 @@ - -github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88) -github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82) -github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52) -github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44) -github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39) -github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30) -github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18) -github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13) -github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12) -github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11) -github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11) -github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10) -github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9) -github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8) -github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7) -github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5) -github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4) -github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4) -github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4) -github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4) -github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3) -github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3) -github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3) -github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3) -github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3) -github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1) -github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1) -github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1) -github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505) - diff --git a/vendor/github.com/eapache/go-resiliency/.gitignore b/vendor/github.com/eapache/go-resiliency/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/eapache/go-resiliency/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/eapache/go-resiliency/.travis.yml b/vendor/github.com/eapache/go-resiliency/.travis.yml deleted file mode 100644 index 516290b6..00000000 --- a/vendor/github.com/eapache/go-resiliency/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.2 - - 1.6 - - 1.10 diff --git a/vendor/github.com/eapache/go-resiliency/CHANGELOG.md b/vendor/github.com/eapache/go-resiliency/CHANGELOG.md deleted file mode 100644 index c8054177..00000000 --- a/vendor/github.com/eapache/go-resiliency/CHANGELOG.md +++ /dev/null @@ -1,11 +0,0 @@ -# Changelog - -#### Version 1.1.0 (2018-03-26) - - - Improve documentation and fix some typos. - - Bump CI to ensure we support newer Golang versions. - - Add `IsEmpty()` method on `Semaphore`. - -#### Version 1.0.0 (2015-02-13) - -Initial release. diff --git a/vendor/github.com/eapache/go-resiliency/README.md b/vendor/github.com/eapache/go-resiliency/README.md deleted file mode 100644 index 0a0d7011..00000000 --- a/vendor/github.com/eapache/go-resiliency/README.md +++ /dev/null @@ -1,21 +0,0 @@ -go-resiliency -============= - -[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) -[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency?status.svg)](https://godoc.org/github.com/eapache/go-resiliency) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -Resiliency patterns for golang. -Based in part on [Hystrix](https://github.com/Netflix/Hystrix), -[Semian](https://github.com/Shopify/semian), and others. - -Currently implemented patterns include: -- circuit-breaker (in the `breaker` directory) -- semaphore (in the `semaphore` directory) -- deadline/timeout (in the `deadline` directory) -- batching (in the `batcher` directory) -- retriable (in the `retrier` directory) - -Follows semantic versioning using https://gopkg.in/ - import from -[`gopkg.in/eapache/go-resiliency.v1`](https://gopkg.in/eapache/go-resiliency.v1) -for guaranteed API stability. diff --git a/vendor/github.com/eapache/go-resiliency/batcher/README.md b/vendor/github.com/eapache/go-resiliency/batcher/README.md deleted file mode 100644 index 60e3b9f0..00000000 --- a/vendor/github.com/eapache/go-resiliency/batcher/README.md +++ /dev/null @@ -1,31 +0,0 @@ -batcher -======= - -[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) -[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/batcher?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/batcher) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -The batching resiliency pattern for golang. - -Creating a batcher takes two parameters: -- the timeout to wait while collecting a batch -- the function to run once a batch has been collected - -You can also optionally set a prefilter to fail queries before they enter the -batch. - -```go -b := batcher.New(10*time.Millisecond, func(params []interface{}) error { - // do something with the batch of parameters - return nil -}) - -b.Prefilter(func(param interface{}) error { - // do some sort of sanity check on the parameter, and return an error if it fails - return nil -}) - -for i := 0; i < 10; i++ { - go b.Run(i) -} -``` diff --git a/vendor/github.com/eapache/go-resiliency/batcher/batcher.go b/vendor/github.com/eapache/go-resiliency/batcher/batcher.go deleted file mode 100644 index 2d1ccb46..00000000 --- a/vendor/github.com/eapache/go-resiliency/batcher/batcher.go +++ /dev/null @@ -1,108 +0,0 @@ -// Package batcher implements the batching resiliency pattern for Go. -package batcher - -import ( - "sync" - "time" -) - -type work struct { - param interface{} - future chan error -} - -// Batcher implements the batching resiliency pattern -type Batcher struct { - timeout time.Duration - prefilter func(interface{}) error - - lock sync.Mutex - submit chan *work - doWork func([]interface{}) error -} - -// New constructs a new batcher that will batch all calls to Run that occur within -// `timeout` time before calling doWork just once for the entire batch. The doWork -// function must be safe to run concurrently with itself as this may occur, especially -// when the timeout is small. -func New(timeout time.Duration, doWork func([]interface{}) error) *Batcher { - return &Batcher{ - timeout: timeout, - doWork: doWork, - } -} - -// Run runs the work function with the given parameter, possibly -// including it in a batch with other calls to Run that occur within the -// specified timeout. It is safe to call Run concurrently on the same batcher. -func (b *Batcher) Run(param interface{}) error { - if b.prefilter != nil { - if err := b.prefilter(param); err != nil { - return err - } - } - - if b.timeout == 0 { - return b.doWork([]interface{}{param}) - } - - w := &work{ - param: param, - future: make(chan error, 1), - } - - b.submitWork(w) - - return <-w.future -} - -// Prefilter specifies an optional function that can be used to run initial checks on parameters -// passed to Run before being added to the batch. If the prefilter returns a non-nil error, -// that error is returned immediately from Run and the batcher is not invoked. A prefilter -// cannot safely be specified for a batcher if Run has already been invoked. The filter function -// specified must be concurrency-safe. -func (b *Batcher) Prefilter(filter func(interface{}) error) { - b.prefilter = filter -} - -func (b *Batcher) submitWork(w *work) { - b.lock.Lock() - defer b.lock.Unlock() - - if b.submit == nil { - b.submit = make(chan *work, 4) - go b.batch() - } - - b.submit <- w -} - -func (b *Batcher) batch() { - var params []interface{} - var futures []chan error - input := b.submit - - go b.timer() - - for work := range input { - params = append(params, work.param) - futures = append(futures, work.future) - } - - ret := b.doWork(params) - - for _, future := range futures { - future <- ret - close(future) - } -} - -func (b *Batcher) timer() { - time.Sleep(b.timeout) - - b.lock.Lock() - defer b.lock.Unlock() - - close(b.submit) - b.submit = nil -} diff --git a/vendor/github.com/eapache/go-resiliency/batcher/batcher_test.go b/vendor/github.com/eapache/go-resiliency/batcher/batcher_test.go deleted file mode 100644 index f1b8d40e..00000000 --- a/vendor/github.com/eapache/go-resiliency/batcher/batcher_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package batcher - -import ( - "errors" - "sync" - "sync/atomic" - "testing" - "time" -) - -var errSomeError = errors.New("errSomeError") - -func returnsError(params []interface{}) error { - return errSomeError -} - -func returnsSuccess(params []interface{}) error { - return nil -} - -func TestBatcherSuccess(t *testing.T) { - b := New(10*time.Millisecond, returnsSuccess) - - wg := &sync.WaitGroup{} - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - if err := b.Run(nil); err != nil { - t.Error(err) - } - wg.Done() - }() - } - wg.Wait() - - b = New(0, returnsSuccess) - for i := 0; i < 10; i++ { - if err := b.Run(nil); err != nil { - t.Error(err) - } - } -} - -func TestBatcherError(t *testing.T) { - b := New(10*time.Millisecond, returnsError) - - wg := &sync.WaitGroup{} - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - if err := b.Run(nil); err != errSomeError { - t.Error(err) - } - wg.Done() - }() - } - wg.Wait() -} - -func TestBatcherPrefilter(t *testing.T) { - b := New(1*time.Millisecond, returnsSuccess) - - b.Prefilter(func(param interface{}) error { - if param == nil { - return errSomeError - } - return nil - }) - - if err := b.Run(nil); err != errSomeError { - t.Error(err) - } - - if err := b.Run(1); err != nil { - t.Error(err) - } -} - -func TestBatcherMultipleBatches(t *testing.T) { - var iters uint32 - - b := New(10*time.Millisecond, func(params []interface{}) error { - atomic.AddUint32(&iters, 1) - return nil - }) - - wg := &sync.WaitGroup{} - - for group := 0; group < 5; group++ { - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - if err := b.Run(nil); err != nil { - t.Error(err) - } - wg.Done() - }() - } - time.Sleep(15 * time.Millisecond) - } - - wg.Wait() - - if iters != 5 { - t.Error("Wrong number of iters:", iters) - } -} - -func ExampleBatcher() { - b := New(10*time.Millisecond, func(params []interface{}) error { - // do something with the batch of parameters - return nil - }) - - b.Prefilter(func(param interface{}) error { - // do some sort of sanity check on the parameter, and return an error if it fails - return nil - }) - - for i := 0; i < 10; i++ { - go b.Run(i) - } -} diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go deleted file mode 100644 index b41308db..00000000 --- a/vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package breaker - -import ( - "errors" - "testing" - "time" -) - -var errSomeError = errors.New("errSomeError") - -func alwaysPanics() error { - panic("foo") -} - -func returnsError() error { - return errSomeError -} - -func returnsSuccess() error { - return nil -} - -func TestBreakerErrorExpiry(t *testing.T) { - breaker := New(2, 1, 1*time.Second) - - for i := 0; i < 3; i++ { - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - time.Sleep(1 * time.Second) - } - - for i := 0; i < 3; i++ { - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - time.Sleep(1 * time.Second) - } -} - -func TestBreakerPanicsCountAsErrors(t *testing.T) { - breaker := New(3, 2, 1*time.Second) - - // three errors opens the breaker - for i := 0; i < 3; i++ { - func() { - defer func() { - val := recover() - if val.(string) != "foo" { - t.Error("incorrect panic") - } - }() - if err := breaker.Run(alwaysPanics); err != nil { - t.Error(err) - } - t.Error("shouldn't get here") - }() - } - - // breaker is open - for i := 0; i < 5; i++ { - if err := breaker.Run(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - } -} - -func TestBreakerStateTransitions(t *testing.T) { - breaker := New(3, 2, 1*time.Second) - - // three errors opens the breaker - for i := 0; i < 3; i++ { - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - } - - // breaker is open - for i := 0; i < 5; i++ { - if err := breaker.Run(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // one success works, but is not enough to fully close - if err := breaker.Run(returnsSuccess); err != nil { - t.Error(err) - } - // error works, but re-opens immediately - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - // breaker is open - if err := breaker.Run(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // two successes is enough to close it for good - for i := 0; i < 2; i++ { - if err := breaker.Run(returnsSuccess); err != nil { - t.Error(err) - } - } - // error works - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - // breaker is still closed - if err := breaker.Run(returnsSuccess); err != nil { - t.Error(err) - } -} - -func TestBreakerAsyncStateTransitions(t *testing.T) { - breaker := New(3, 2, 1*time.Second) - - // three errors opens the breaker - for i := 0; i < 3; i++ { - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - } - - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - - // breaker is open - for i := 0; i < 5; i++ { - if err := breaker.Go(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // one success works, but is not enough to fully close - if err := breaker.Go(returnsSuccess); err != nil { - t.Error(err) - } - // error works, but re-opens immediately - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - // breaker is open - if err := breaker.Go(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // two successes is enough to close it for good - for i := 0; i < 2; i++ { - if err := breaker.Go(returnsSuccess); err != nil { - t.Error(err) - } - } - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - // error works - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - // breaker is still closed - if err := breaker.Go(returnsSuccess); err != nil { - t.Error(err) - } -} - -func ExampleBreaker() { - breaker := New(3, 1, 5*time.Second) - - for { - result := breaker.Run(func() error { - // communicate with some external service and - // return an error if the communication failed - return nil - }) - - switch result { - case nil: - // success! - case ErrBreakerOpen: - // our function wasn't run because the breaker was open - default: - // some other error - } - } -} diff --git a/vendor/github.com/eapache/go-resiliency/deadline/README.md b/vendor/github.com/eapache/go-resiliency/deadline/README.md deleted file mode 100644 index ac97b460..00000000 --- a/vendor/github.com/eapache/go-resiliency/deadline/README.md +++ /dev/null @@ -1,27 +0,0 @@ -deadline -======== - -[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) -[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/deadline?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/deadline) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -The deadline/timeout resiliency pattern for golang. - -Creating a deadline takes one parameter: how long to wait. - -```go -dl := deadline.New(1 * time.Second) - -err := dl.Run(func(stopper <-chan struct{}) error { - // do something potentially slow - // give up when the `stopper` channel is closed (indicating a time-out) - return nil -}) - -switch err { -case deadline.ErrTimedOut: - // execution took too long, oops -default: - // some other error -} -``` diff --git a/vendor/github.com/eapache/go-resiliency/deadline/deadline.go b/vendor/github.com/eapache/go-resiliency/deadline/deadline.go deleted file mode 100644 index 3a6dfb0e..00000000 --- a/vendor/github.com/eapache/go-resiliency/deadline/deadline.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package deadline implements the deadline (also known as "timeout") resiliency pattern for Go. -package deadline - -import ( - "errors" - "time" -) - -// ErrTimedOut is the error returned from Run when the deadline expires. -var ErrTimedOut = errors.New("timed out waiting for function to finish") - -// Deadline implements the deadline/timeout resiliency pattern. -type Deadline struct { - timeout time.Duration -} - -// New constructs a new Deadline with the given timeout. -func New(timeout time.Duration) *Deadline { - return &Deadline{ - timeout: timeout, - } -} - -// Run runs the given function, passing it a stopper channel. If the deadline passes before -// the function finishes executing, Run returns ErrTimeOut to the caller and closes the stopper -// channel so that the work function can attempt to exit gracefully. It does not (and cannot) -// simply kill the running function, so if it doesn't respect the stopper channel then it may -// keep running after the deadline passes. If the function finishes before the deadline, then -// the return value of the function is returned from Run. -func (d *Deadline) Run(work func(<-chan struct{}) error) error { - result := make(chan error) - stopper := make(chan struct{}) - - go func() { - result <- work(stopper) - }() - - select { - case ret := <-result: - return ret - case <-time.After(d.timeout): - close(stopper) - return ErrTimedOut - } -} diff --git a/vendor/github.com/eapache/go-resiliency/deadline/deadline_test.go b/vendor/github.com/eapache/go-resiliency/deadline/deadline_test.go deleted file mode 100644 index 6939f52e..00000000 --- a/vendor/github.com/eapache/go-resiliency/deadline/deadline_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package deadline - -import ( - "errors" - "testing" - "time" -) - -func takesFiveMillis(stopper <-chan struct{}) error { - time.Sleep(5 * time.Millisecond) - return nil -} - -func takesTwentyMillis(stopper <-chan struct{}) error { - time.Sleep(20 * time.Millisecond) - return nil -} - -func returnsError(stopper <-chan struct{}) error { - return errors.New("foo") -} - -func TestDeadline(t *testing.T) { - dl := New(10 * time.Millisecond) - - if err := dl.Run(takesFiveMillis); err != nil { - t.Error(err) - } - - if err := dl.Run(takesTwentyMillis); err != ErrTimedOut { - t.Error(err) - } - - if err := dl.Run(returnsError); err.Error() != "foo" { - t.Error(err) - } - - done := make(chan struct{}) - err := dl.Run(func(stopper <-chan struct{}) error { - <-stopper - close(done) - return nil - }) - if err != ErrTimedOut { - t.Error(err) - } - <-done -} - -func ExampleDeadline() { - dl := New(1 * time.Second) - - err := dl.Run(func(stopper <-chan struct{}) error { - // do something possibly slow - // check stopper function and give up if timed out - return nil - }) - - switch err { - case ErrTimedOut: - // execution took too long, oops - default: - // some other error - } -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/README.md b/vendor/github.com/eapache/go-resiliency/retrier/README.md deleted file mode 100644 index dd30af7a..00000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/README.md +++ /dev/null @@ -1,26 +0,0 @@ -retrier -======= - -[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) -[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/retrier?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/retrier) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -The retriable resiliency pattern for golang. - -Creating a retrier takes two parameters: -- the times to back-off between retries (and implicitly the number of times to - retry) -- the classifier that determines which errors to retry - -```go -r := retrier.New(retrier.ConstantBackoff(3, 100*time.Millisecond), nil) - -err := r.Run(func() error { - // do some work - return nil -}) - -if err != nil { - // handle the case where the work failed three times -} -``` diff --git a/vendor/github.com/eapache/go-resiliency/retrier/backoffs.go b/vendor/github.com/eapache/go-resiliency/retrier/backoffs.go deleted file mode 100644 index faf6f8cf..00000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/backoffs.go +++ /dev/null @@ -1,24 +0,0 @@ -package retrier - -import "time" - -// ConstantBackoff generates a simple back-off strategy of retrying 'n' times, and waiting 'amount' time after each one. -func ConstantBackoff(n int, amount time.Duration) []time.Duration { - ret := make([]time.Duration, n) - for i := range ret { - ret[i] = amount - } - return ret -} - -// ExponentialBackoff generates a simple back-off strategy of retrying 'n' times, and doubling the amount of -// time waited after each one. -func ExponentialBackoff(n int, initialAmount time.Duration) []time.Duration { - ret := make([]time.Duration, n) - next := initialAmount - for i := range ret { - ret[i] = next - next *= 2 - } - return ret -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/backoffs_test.go b/vendor/github.com/eapache/go-resiliency/retrier/backoffs_test.go deleted file mode 100644 index 1168adfe..00000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/backoffs_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package retrier - -import ( - "testing" - "time" -) - -func TestConstantBackoff(t *testing.T) { - b := ConstantBackoff(1, 10*time.Millisecond) - if len(b) != 1 { - t.Error("incorrect length") - } - for i := range b { - if b[i] != 10*time.Millisecond { - t.Error("incorrect value at", i) - } - } - - b = ConstantBackoff(10, 250*time.Hour) - if len(b) != 10 { - t.Error("incorrect length") - } - for i := range b { - if b[i] != 250*time.Hour { - t.Error("incorrect value at", i) - } - } -} - -func TestExponentialBackoff(t *testing.T) { - b := ExponentialBackoff(1, 10*time.Millisecond) - if len(b) != 1 { - t.Error("incorrect length") - } - if b[0] != 10*time.Millisecond { - t.Error("incorrect value") - } - - b = ExponentialBackoff(4, 1*time.Minute) - if len(b) != 4 { - t.Error("incorrect length") - } - if b[0] != 1*time.Minute { - t.Error("incorrect value") - } - if b[1] != 2*time.Minute { - t.Error("incorrect value") - } - if b[2] != 4*time.Minute { - t.Error("incorrect value") - } - if b[3] != 8*time.Minute { - t.Error("incorrect value") - } -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/classifier.go b/vendor/github.com/eapache/go-resiliency/retrier/classifier.go deleted file mode 100644 index 7dd71c79..00000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/classifier.go +++ /dev/null @@ -1,66 +0,0 @@ -package retrier - -// Action is the type returned by a Classifier to indicate how the Retrier should proceed. -type Action int - -const ( - Succeed Action = iota // Succeed indicates the Retrier should treat this value as a success. - Fail // Fail indicates the Retrier should treat this value as a hard failure and not retry. - Retry // Retry indicates the Retrier should treat this value as a soft failure and retry. -) - -// Classifier is the interface implemented by anything that can classify Errors for a Retrier. -type Classifier interface { - Classify(error) Action -} - -// DefaultClassifier classifies errors in the simplest way possible. If -// the error is nil, it returns Succeed, otherwise it returns Retry. -type DefaultClassifier struct{} - -// Classify implements the Classifier interface. -func (c DefaultClassifier) Classify(err error) Action { - if err == nil { - return Succeed - } - - return Retry -} - -// WhitelistClassifier classifies errors based on a whitelist. If the error is nil, it -// returns Succeed; if the error is in the whitelist, it returns Retry; otherwise, it returns Fail. -type WhitelistClassifier []error - -// Classify implements the Classifier interface. -func (list WhitelistClassifier) Classify(err error) Action { - if err == nil { - return Succeed - } - - for _, pass := range list { - if err == pass { - return Retry - } - } - - return Fail -} - -// BlacklistClassifier classifies errors based on a blacklist. If the error is nil, it -// returns Succeed; if the error is in the blacklist, it returns Fail; otherwise, it returns Retry. -type BlacklistClassifier []error - -// Classify implements the Classifier interface. -func (list BlacklistClassifier) Classify(err error) Action { - if err == nil { - return Succeed - } - - for _, pass := range list { - if err == pass { - return Fail - } - } - - return Retry -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/classifier_test.go b/vendor/github.com/eapache/go-resiliency/retrier/classifier_test.go deleted file mode 100644 index 953102fb..00000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/classifier_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package retrier - -import ( - "errors" - "testing" -) - -var ( - errFoo = errors.New("FOO") - errBar = errors.New("BAR") - errBaz = errors.New("BAZ") -) - -func TestDefaultClassifier(t *testing.T) { - c := DefaultClassifier{} - - if c.Classify(nil) != Succeed { - t.Error("default misclassified nil") - } - - if c.Classify(errFoo) != Retry { - t.Error("default misclassified foo") - } - if c.Classify(errBar) != Retry { - t.Error("default misclassified bar") - } - if c.Classify(errBaz) != Retry { - t.Error("default misclassified baz") - } -} - -func TestWhitelistClassifier(t *testing.T) { - c := WhitelistClassifier{errFoo, errBar} - - if c.Classify(nil) != Succeed { - t.Error("whitelist misclassified nil") - } - - if c.Classify(errFoo) != Retry { - t.Error("whitelist misclassified foo") - } - if c.Classify(errBar) != Retry { - t.Error("whitelist misclassified bar") - } - if c.Classify(errBaz) != Fail { - t.Error("whitelist misclassified baz") - } -} - -func TestBlacklistClassifier(t *testing.T) { - c := BlacklistClassifier{errBar} - - if c.Classify(nil) != Succeed { - t.Error("blacklist misclassified nil") - } - - if c.Classify(errFoo) != Retry { - t.Error("blacklist misclassified foo") - } - if c.Classify(errBar) != Fail { - t.Error("blacklist misclassified bar") - } - if c.Classify(errBaz) != Retry { - t.Error("blacklist misclassified baz") - } -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/retrier.go b/vendor/github.com/eapache/go-resiliency/retrier/retrier.go deleted file mode 100644 index ff328742..00000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/retrier.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package retrier implements the "retriable" resiliency pattern for Go. -package retrier - -import ( - "math/rand" - "time" -) - -// Retrier implements the "retriable" resiliency pattern, abstracting out the process of retrying a failed action -// a certain number of times with an optional back-off between each retry. -type Retrier struct { - backoff []time.Duration - class Classifier - jitter float64 - rand *rand.Rand -} - -// New constructs a Retrier with the given backoff pattern and classifier. The length of the backoff pattern -// indicates how many times an action will be retried, and the value at each index indicates the amount of time -// waited before each subsequent retry. The classifier is used to determine which errors should be retried and -// which should cause the retrier to fail fast. The DefaultClassifier is used if nil is passed. -func New(backoff []time.Duration, class Classifier) *Retrier { - if class == nil { - class = DefaultClassifier{} - } - - return &Retrier{ - backoff: backoff, - class: class, - rand: rand.New(rand.NewSource(time.Now().UnixNano())), - } -} - -// Run executes the given work function, then classifies its return value based on the classifier used -// to construct the Retrier. If the result is Succeed or Fail, the return value of the work function is -// returned to the caller. If the result is Retry, then Run sleeps according to the its backoff policy -// before retrying. If the total number of retries is exceeded then the return value of the work function -// is returned to the caller regardless. -func (r *Retrier) Run(work func() error) error { - retries := 0 - for { - ret := work() - - switch r.class.Classify(ret) { - case Succeed, Fail: - return ret - case Retry: - if retries >= len(r.backoff) { - return ret - } - time.Sleep(r.calcSleep(retries)) - retries++ - } - } -} - -func (r *Retrier) calcSleep(i int) time.Duration { - // take a random float in the range (-r.jitter, +r.jitter) and multiply it by the base amount - return r.backoff[i] + time.Duration(((r.rand.Float64()*2)-1)*r.jitter*float64(r.backoff[i])) -} - -// SetJitter sets the amount of jitter on each back-off to a factor between 0.0 and 1.0 (values outside this range -// are silently ignored). When a retry occurs, the back-off is adjusted by a random amount up to this value. -func (r *Retrier) SetJitter(jit float64) { - if jit < 0 || jit > 1 { - return - } - r.jitter = jit -} diff --git a/vendor/github.com/eapache/go-resiliency/retrier/retrier_test.go b/vendor/github.com/eapache/go-resiliency/retrier/retrier_test.go deleted file mode 100644 index 2d061d9b..00000000 --- a/vendor/github.com/eapache/go-resiliency/retrier/retrier_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package retrier - -import ( - "testing" - "time" -) - -var i int - -func genWork(returns []error) func() error { - i = 0 - return func() error { - i++ - if i > len(returns) { - return nil - } - return returns[i-1] - } -} - -func TestRetrier(t *testing.T) { - r := New([]time.Duration{0, 10 * time.Millisecond}, WhitelistClassifier{errFoo}) - - err := r.Run(genWork([]error{errFoo, errFoo})) - if err != nil { - t.Error(err) - } - if i != 3 { - t.Error("run wrong number of times") - } - - err = r.Run(genWork([]error{errFoo, errBar})) - if err != errBar { - t.Error(err) - } - if i != 2 { - t.Error("run wrong number of times") - } - - err = r.Run(genWork([]error{errBar, errBaz})) - if err != errBar { - t.Error(err) - } - if i != 1 { - t.Error("run wrong number of times") - } -} - -func TestRetrierNone(t *testing.T) { - r := New(nil, nil) - - i = 0 - err := r.Run(func() error { - i++ - return errFoo - }) - if err != errFoo { - t.Error(err) - } - if i != 1 { - t.Error("run wrong number of times") - } - - i = 0 - err = r.Run(func() error { - i++ - return nil - }) - if err != nil { - t.Error(err) - } - if i != 1 { - t.Error("run wrong number of times") - } -} - -func TestRetrierJitter(t *testing.T) { - r := New([]time.Duration{0, 10 * time.Millisecond, 4 * time.Hour}, nil) - - if r.calcSleep(0) != 0 { - t.Error("Incorrect sleep calculated") - } - if r.calcSleep(1) != 10*time.Millisecond { - t.Error("Incorrect sleep calculated") - } - if r.calcSleep(2) != 4*time.Hour { - t.Error("Incorrect sleep calculated") - } - - r.SetJitter(0.25) - for i := 0; i < 20; i++ { - if r.calcSleep(0) != 0 { - t.Error("Incorrect sleep calculated") - } - - slp := r.calcSleep(1) - if slp < 7500*time.Microsecond || slp > 12500*time.Microsecond { - t.Error("Incorrect sleep calculated") - } - - slp = r.calcSleep(2) - if slp < 3*time.Hour || slp > 5*time.Hour { - t.Error("Incorrect sleep calculated") - } - } - - r.SetJitter(-1) - if r.jitter != 0.25 { - t.Error("Invalid jitter value accepted") - } - - r.SetJitter(2) - if r.jitter != 0.25 { - t.Error("Invalid jitter value accepted") - } -} - -func ExampleRetrier() { - r := New(ConstantBackoff(3, 100*time.Millisecond), nil) - - err := r.Run(func() error { - // do some work - return nil - }) - - if err != nil { - // handle the case where the work failed three times - } -} diff --git a/vendor/github.com/eapache/go-resiliency/semaphore/README.md b/vendor/github.com/eapache/go-resiliency/semaphore/README.md deleted file mode 100644 index a4a73ea0..00000000 --- a/vendor/github.com/eapache/go-resiliency/semaphore/README.md +++ /dev/null @@ -1,22 +0,0 @@ -semaphore -========= - -[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) -[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/semaphore?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/semaphore) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -The semaphore resiliency pattern for golang. - -Creating a semaphore takes two parameters: -- ticket count (how many tickets to give out at once) -- timeout (how long to wait for a ticket if none are currently available) - -```go -sem := semaphore.New(3, 1*time.Second) - -if err := sem.Acquire(); err != nil { - // could not acquire semaphore - return err -} -defer sem.Release() -``` diff --git a/vendor/github.com/eapache/go-resiliency/semaphore/semaphore.go b/vendor/github.com/eapache/go-resiliency/semaphore/semaphore.go deleted file mode 100644 index d4aaa04c..00000000 --- a/vendor/github.com/eapache/go-resiliency/semaphore/semaphore.go +++ /dev/null @@ -1,52 +0,0 @@ -// Package semaphore implements the semaphore resiliency pattern for Go. -package semaphore - -import ( - "errors" - "time" -) - -// ErrNoTickets is the error returned by Acquire when it could not acquire -// a ticket from the semaphore within the configured timeout. -var ErrNoTickets = errors.New("could not acquire semaphore ticket") - -// Semaphore implements the semaphore resiliency pattern -type Semaphore struct { - sem chan struct{} - timeout time.Duration -} - -// New constructs a new Semaphore with the given ticket-count -// and timeout. -func New(tickets int, timeout time.Duration) *Semaphore { - return &Semaphore{ - sem: make(chan struct{}, tickets), - timeout: timeout, - } -} - -// Acquire tries to acquire a ticket from the semaphore. If it can, it returns nil. -// If it cannot after "timeout" amount of time, it returns ErrNoTickets. It is -// safe to call Acquire concurrently on a single Semaphore. -func (s *Semaphore) Acquire() error { - select { - case s.sem <- struct{}{}: - return nil - case <-time.After(s.timeout): - return ErrNoTickets - } -} - -// Release releases an acquired ticket back to the semaphore. It is safe to call -// Release concurrently on a single Semaphore. It is an error to call Release on -// a Semaphore from which you have not first acquired a ticket. -func (s *Semaphore) Release() { - <-s.sem -} - -// IsEmpty will return true if no tickets are being held at that instant. -// It is safe to call concurrently with Acquire and Release, though do note -// that the result may then be unpredictable. -func (s *Semaphore) IsEmpty() bool { - return len(s.sem) == 0 -} diff --git a/vendor/github.com/eapache/go-resiliency/semaphore/semaphore_test.go b/vendor/github.com/eapache/go-resiliency/semaphore/semaphore_test.go deleted file mode 100644 index 08da7914..00000000 --- a/vendor/github.com/eapache/go-resiliency/semaphore/semaphore_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package semaphore - -import ( - "testing" - "time" -) - -func TestSemaphoreAcquireRelease(t *testing.T) { - sem := New(3, 1*time.Second) - - for i := 0; i < 10; i++ { - if err := sem.Acquire(); err != nil { - t.Error(err) - } - if err := sem.Acquire(); err != nil { - t.Error(err) - } - if err := sem.Acquire(); err != nil { - t.Error(err) - } - sem.Release() - sem.Release() - sem.Release() - } -} - -func TestSemaphoreBlockTimeout(t *testing.T) { - sem := New(1, 200*time.Millisecond) - - if err := sem.Acquire(); err != nil { - t.Error(err) - } - - start := time.Now() - if err := sem.Acquire(); err != ErrNoTickets { - t.Error(err) - } - if start.Add(200 * time.Millisecond).After(time.Now()) { - t.Error("semaphore did not wait long enough") - } - - sem.Release() - if err := sem.Acquire(); err != nil { - t.Error(err) - } -} - -func TestSemaphoreEmpty(t *testing.T) { - sem := New(2, 200*time.Millisecond) - - if !sem.IsEmpty() { - t.Error("semaphore should be empty") - } - - sem.Acquire() - - if sem.IsEmpty() { - t.Error("semaphore should not be empty") - } - - sem.Release() - - if !sem.IsEmpty() { - t.Error("semaphore should be empty") - } -} - -func ExampleSemaphore() { - sem := New(3, 1*time.Second) - - for i := 0; i < 10; i++ { - go func() { - if err := sem.Acquire(); err != nil { - return //could not acquire semaphore - } - defer sem.Release() - - // do something semaphore-guarded - }() - } -} diff --git a/vendor/github.com/eapache/go-xerial-snappy/corpus/020dfb19a68cbcf99dc93dc1030068d4c9968ad0-2 b/vendor/github.com/eapache/go-xerial-snappy/corpus/020dfb19a68cbcf99dc93dc1030068d4c9968ad0-2 deleted file mode 100644 index 1b704ae0ea46b0fd879bebf2ed4b1638b3016f4f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8 PcmaFAfA9PKdm|YD8@B~8 diff --git a/vendor/github.com/eapache/go-xerial-snappy/corpus/05979b224be0294bf350310d4ba5257c9bb815db-3 b/vendor/github.com/eapache/go-xerial-snappy/corpus/05979b224be0294bf350310d4ba5257c9bb815db-3 deleted file mode 100644 index 8c321a61..00000000 --- a/vendor/github.com/eapache/go-xerial-snappy/corpus/05979b224be0294bf350310d4ba5257c9bb815db-3 +++ /dev/null @@ -1 +0,0 @@ -�ï¿ï¿½ï¿½Y \ No newline at end of file diff --git a/vendor/github.com/eapache/go-xerial-snappy/corpus/0e64ca2823923c5efa03ff2bd6e0aa1018eeca3b-9 b/vendor/github.com/eapache/go-xerial-snappy/corpus/0e64ca2823923c5efa03ff2bd6e0aa1018eeca3b-9 deleted file mode 100644 index d413114b2d7a6b16ef8fcbf79853b24a4747dacb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 56 vcmZn)_Hzsfh-6@pV_;-p0Ap@Wg#aH%Pd^vO5TFQHhyf@DWr0*NFe(55)U*Xx diff --git a/vendor/github.com/eapache/go-xerial-snappy/corpus/1 b/vendor/github.com/eapache/go-xerial-snappy/corpus/1 deleted file mode 100644 index 5e3abaf35b463142c5e53036bf70063aa37f18ca..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 31 fcmZn)_Hzsfh-3f)MkwayR0!~K^z?Ia3~>YiQUnDe diff --git a/vendor/github.com/eapache/go-xerial-snappy/corpus/361a1c6d2a8f80780826c3d83ad391d0475c922f-4 b/vendor/github.com/eapache/go-xerial-snappy/corpus/361a1c6d2a8f80780826c3d83ad391d0475c922f-4 deleted file mode 100644 index d2528bad484a4fe7372fc3858461660fef90fba3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 50 jcmZn)_Hzsfh-3f)MkwayR0!~K^z?Ia41o!Q<)8`xu9*bo diff --git a/vendor/github.com/eapache/go-xerial-snappy/corpus/4117af68228fa64339d362cf980c68ffadff96c8-12 b/vendor/github.com/eapache/go-xerial-snappy/corpus/4117af68228fa64339d362cf980c68ffadff96c8-12 deleted file mode 100644 index 38ee90f859b809677a61199f7c11b5f8af80b33c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 248 zcmZn)_Hzsfh-6@pV_;-p0Ap@W1sBHW~Em diff --git a/vendor/github.com/eapache/go-xerial-snappy/corpus/581b8fe7088f921567811fdf30e1f527c9f48e5e b/vendor/github.com/eapache/go-xerial-snappy/corpus/581b8fe7088f921567811fdf30e1f527c9f48e5e deleted file mode 100644 index 59c77e4d..00000000 --- a/vendor/github.com/eapache/go-xerial-snappy/corpus/581b8fe7088f921567811fdf30e1f527c9f48e5e +++ /dev/null @@ -1 +0,0 @@ -package \ No newline at end of file diff --git a/vendor/github.com/eapache/go-xerial-snappy/corpus/60cd10738158020f5843b43960158c3d116b3a71-11 b/vendor/github.com/eapache/go-xerial-snappy/corpus/60cd10738158020f5843b43960158c3d116b3a71-11 deleted file mode 100644 index 801e2cf29ca389c2b507273e2b5bb7a330433c8e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 195 bcmZn)_Hzsfh-3f)KNrUk5Y0HG2u20~rP%|i diff --git a/vendor/github.com/eapache/go-xerial-snappy/corpus/652b031b4b9d601235f86ef62523e63d733b8623-3 b/vendor/github.com/eapache/go-xerial-snappy/corpus/652b031b4b9d601235f86ef62523e63d733b8623-3 deleted file mode 100644 index 76bd9d9ebc765a0a6c28aece7027fa0e2b77b5c8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45 kcmZn)_Hzsfh-3f)MkwayR0!~K^z?Ia41o!Qa>0sv{022=n5 diff --git a/vendor/github.com/eapache/go-xerial-snappy/corpus/efa11d676fb2a77afb8eac3d7ed30e330a7c2efe-11 b/vendor/github.com/eapache/go-xerial-snappy/corpus/efa11d676fb2a77afb8eac3d7ed30e330a7c2efe-11 deleted file mode 100644 index 2031634884f95d331e0cd7b237ae3e0131f43f6e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 116 zcmZn)_Hzsfh-6@pV_;-p0Ap@W1sBH&1)" != "" ]]; then - git status >&2; - git diff -a >&2; - exit 1; - fi; - echo "git status is clean."; - fi; - - make test diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile deleted file mode 100644 index 7a51c95b..00000000 --- a/vendor/github.com/golang/protobuf/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -all: install - -install: - go install ./proto ./jsonpb ./ptypes ./protoc-gen-go - -test: - go test ./... ./protoc-gen-go/testdata - go test -tags purego ./... ./protoc-gen-go/testdata - go build ./protoc-gen-go/testdata/grpc/grpc.pb.go - make -C conformance test - -clean: - go clean ./... - -nuke: - go clean -i ./... - -regenerate: - ./regenerate.sh diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md deleted file mode 100644 index 61820bed..00000000 --- a/vendor/github.com/golang/protobuf/README.md +++ /dev/null @@ -1,281 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format - -[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf) -[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf) - -Google's data interchange format. -Copyright 2010 The Go Authors. -https://github.com/golang/protobuf - -This package and the code it generates requires at least Go 1.6. - -This software implements Go bindings for protocol buffers. For -information about protocol buffers themselves, see - https://developers.google.com/protocol-buffers/ - -## Installation ## - -To use this software, you must: -- Install the standard C++ implementation of protocol buffers from - https://developers.google.com/protocol-buffers/ -- Of course, install the Go compiler and tools from - https://golang.org/ - See - https://golang.org/doc/install - for details or, if you are using gccgo, follow the instructions at - https://golang.org/doc/install/gccgo -- Grab the code from the repository and install the proto package. - The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. - The compiler plugin, protoc-gen-go, will be installed in $GOBIN, - defaulting to $GOPATH/bin. It must be in your $PATH for the protocol - compiler, protoc, to find it. - -This software has two parts: a 'protocol compiler plugin' that -generates Go source files that, once compiled, can access and manage -protocol buffers; and a library that implements run-time support for -encoding (marshaling), decoding (unmarshaling), and accessing protocol -buffers. - -There is support for gRPC in Go using protocol buffers. -See the note at the bottom of this file for details. - -There are no insertion points in the plugin. - - -## Using protocol buffers with Go ## - -Once the software is installed, there are two steps to using it. -First you must compile the protocol buffer definitions and then import -them, with the support library, into your program. - -To compile the protocol buffer definition, run protoc with the --go_out -parameter set to the directory you want to output the Go code to. - - protoc --go_out=. *.proto - -The generated files will be suffixed .pb.go. See the Test code below -for an example using such a file. - -## Packages and input paths ## - -The protocol buffer language has a concept of "packages" which does not -correspond well to the Go notion of packages. In generated Go code, -each source `.proto` file is associated with a single Go package. The -name and import path for this package is specified with the `go_package` -proto option: - - option go_package = "github.com/golang/protobuf/ptypes/any"; - -The protocol buffer compiler will attempt to derive a package name and -import path if a `go_package` option is not present, but it is -best to always specify one explicitly. - -There is a one-to-one relationship between source `.proto` files and -generated `.pb.go` files, but any number of `.pb.go` files may be -contained in the same Go package. - -The output name of a generated file is produced by replacing the -`.proto` suffix with `.pb.go` (e.g., `foo.proto` produces `foo.pb.go`). -However, the output directory is selected in one of two ways. Let -us say we have `inputs/x.proto` with a `go_package` option of -`github.com/golang/protobuf/p`. The corresponding output file may -be: - -- Relative to the import path: - -```shell - protoc --go_out=. inputs/x.proto - # writes ./github.com/golang/protobuf/p/x.pb.go -``` - - (This can work well with `--go_out=$GOPATH`.) - -- Relative to the input file: - -```shell -protoc --go_out=paths=source_relative:. inputs/x.proto -# generate ./inputs/x.pb.go -``` - -## Generated code ## - -The package comment for the proto library contains text describing -the interface provided in Go for protocol buffers. Here is an edited -version. - -The proto package converts data structures to and from the -wire format of protocol buffers. It works in concert with the -Go source code generated for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - Helpers for getting values are superseded by the - GetFoo methods and their use is deprecated. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed with the enum's type name. Enum types have - a String method, and a Enum method to assist in message construction. - - Nested groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -Consider file test.proto, containing - -```proto - syntax = "proto2"; - package example; - - enum FOO { X = 17; }; - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - } -``` - -To create and play with a Test object from the example package, - -```go - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - "path/to/example" - ) - - func main() { - test := &example.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &example.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // etc. - } -``` - -## Parameters ## - -To pass extra parameters to the plugin, use a comma-separated -parameter list separated from the output directory by a colon: - - protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto - -- `paths=(import | source_relative)` - specifies how the paths of - generated files are structured. See the "Packages and imports paths" - section above. The default is `import`. -- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to - load. The only plugin in this repo is `grpc`. -- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is - associated with Go package quux/shme. This is subject to the - import_prefix parameter. - -The following parameters are deprecated and should not be used: - -- `import_prefix=xxx` - a prefix that is added onto the beginning of - all imports. -- `import_path=foo/bar` - used as the package if no input files - declare `go_package`. If it contains slashes, everything up to the - rightmost slash is ignored. - -## gRPC Support ## - -If a proto file specifies RPC services, protoc-gen-go can be instructed to -generate code compatible with gRPC (http://www.grpc.io/). To do this, pass -the `plugins` parameter to protoc-gen-go; the usual way is to insert it into -the --go_out argument to protoc: - - protoc --go_out=plugins=grpc:. *.proto - -## Compatibility ## - -The library and the generated code are expected to be stable over time. -However, we reserve the right to make breaking changes without notice for the -following reasons: - -- Security. A security issue in the specification or implementation may come to - light whose resolution requires breaking compatibility. We reserve the right - to address such security issues. -- Unspecified behavior. There are some aspects of the Protocol Buffers - specification that are undefined. Programs that depend on such unspecified - behavior may break in future releases. -- Specification errors or changes. If it becomes necessary to address an - inconsistency, incompleteness, or change in the Protocol Buffers - specification, resolving the issue could affect the meaning or legality of - existing programs. We reserve the right to address such issues, including - updating the implementations. -- Bugs. If the library has a bug that violates the specification, a program - that depends on the buggy behavior may break if the bug is fixed. We reserve - the right to fix such bugs. -- Adding methods or fields to generated structs. These may conflict with field - names that already exist in a schema, causing applications to break. When the - code generator encounters a field in the schema that would collide with a - generated field or method name, the code generator will append an underscore - to the generated field or method name. -- Adding, removing, or changing methods or fields in generated structs that - start with `XXX`. These parts of the generated code are exported out of - necessity, but should not be considered part of the public API. -- Adding, removing, or changing unexported symbols in generated code. - -Any breaking changes outside of these will be announced 6 months in advance to -protobuf@googlegroups.com. - -You should, whenever possible, use generated code created by the `protoc-gen-go` -tool built at the same commit as the `proto` package. The `proto` package -declares package-level constants in the form `ProtoPackageIsVersionX`. -Application code and generated code may depend on one of these constants to -ensure that compilation will fail if the available version of the proto library -is too old. Whenever we make a change to the generated code that requires newer -library support, in the same commit we will increment the version number of the -generated code and declare a new package-level constant whose name incorporates -the latest version number. Removing a compatibility constant is considered a -breaking change and would be subject to the announcement policy stated above. - -The `protoc-gen-go/generator` package exposes a plugin interface, -which is used by the gRPC code generation. This interface is not -supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/golang/protobuf/conformance/Makefile b/vendor/github.com/golang/protobuf/conformance/Makefile deleted file mode 100644 index b99e4ed6..00000000 --- a/vendor/github.com/golang/protobuf/conformance/Makefile +++ /dev/null @@ -1,49 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2016 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -PROTOBUF_ROOT=$(HOME)/src/protobuf - -all: - @echo To run the tests in this directory, acquire the main protobuf - @echo distribution from: - @echo - @echo ' https://github.com/google/protobuf' - @echo - @echo Build the test runner with: - @echo - @echo ' cd conformance && make conformance-test-runner' - @echo - @echo And run the tests in this directory with: - @echo - @echo ' make test PROTOBUF_ROOT=' - -test: - ./test.sh $(PROTOBUF_ROOT) diff --git a/vendor/github.com/golang/protobuf/conformance/conformance.go b/vendor/github.com/golang/protobuf/conformance/conformance.go deleted file mode 100644 index 3029312a..00000000 --- a/vendor/github.com/golang/protobuf/conformance/conformance.go +++ /dev/null @@ -1,154 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// conformance implements the conformance test subprocess protocol as -// documented in conformance.proto. -package main - -import ( - "encoding/binary" - "fmt" - "io" - "os" - - pb "github.com/golang/protobuf/conformance/internal/conformance_proto" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" -) - -func main() { - var sizeBuf [4]byte - inbuf := make([]byte, 0, 4096) - outbuf := proto.NewBuffer(nil) - for { - if _, err := io.ReadFull(os.Stdin, sizeBuf[:]); err == io.EOF { - break - } else if err != nil { - fmt.Fprintln(os.Stderr, "go conformance: read request:", err) - os.Exit(1) - } - size := binary.LittleEndian.Uint32(sizeBuf[:]) - if int(size) > cap(inbuf) { - inbuf = make([]byte, size) - } - inbuf = inbuf[:size] - if _, err := io.ReadFull(os.Stdin, inbuf); err != nil { - fmt.Fprintln(os.Stderr, "go conformance: read request:", err) - os.Exit(1) - } - - req := new(pb.ConformanceRequest) - if err := proto.Unmarshal(inbuf, req); err != nil { - fmt.Fprintln(os.Stderr, "go conformance: parse request:", err) - os.Exit(1) - } - res := handle(req) - - if err := outbuf.Marshal(res); err != nil { - fmt.Fprintln(os.Stderr, "go conformance: marshal response:", err) - os.Exit(1) - } - binary.LittleEndian.PutUint32(sizeBuf[:], uint32(len(outbuf.Bytes()))) - if _, err := os.Stdout.Write(sizeBuf[:]); err != nil { - fmt.Fprintln(os.Stderr, "go conformance: write response:", err) - os.Exit(1) - } - if _, err := os.Stdout.Write(outbuf.Bytes()); err != nil { - fmt.Fprintln(os.Stderr, "go conformance: write response:", err) - os.Exit(1) - } - outbuf.Reset() - } -} - -var jsonMarshaler = jsonpb.Marshaler{ - OrigName: true, -} - -func handle(req *pb.ConformanceRequest) *pb.ConformanceResponse { - var err error - var msg pb.TestAllTypes - switch p := req.Payload.(type) { - case *pb.ConformanceRequest_ProtobufPayload: - err = proto.Unmarshal(p.ProtobufPayload, &msg) - case *pb.ConformanceRequest_JsonPayload: - err = jsonpb.UnmarshalString(p.JsonPayload, &msg) - default: - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_RuntimeError{ - RuntimeError: "unknown request payload type", - }, - } - } - if err != nil { - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_ParseError{ - ParseError: err.Error(), - }, - } - } - switch req.RequestedOutputFormat { - case pb.WireFormat_PROTOBUF: - p, err := proto.Marshal(&msg) - if err != nil { - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_SerializeError{ - SerializeError: err.Error(), - }, - } - } - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_ProtobufPayload{ - ProtobufPayload: p, - }, - } - case pb.WireFormat_JSON: - p, err := jsonMarshaler.MarshalToString(&msg) - if err != nil { - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_SerializeError{ - SerializeError: err.Error(), - }, - } - } - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_JsonPayload{ - JsonPayload: p, - }, - } - default: - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_RuntimeError{ - RuntimeError: "unknown output format", - }, - } - } -} diff --git a/vendor/github.com/golang/protobuf/conformance/conformance.sh b/vendor/github.com/golang/protobuf/conformance/conformance.sh deleted file mode 100755 index 8532f571..00000000 --- a/vendor/github.com/golang/protobuf/conformance/conformance.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd $(dirname $0) -exec go run conformance.go $* diff --git a/vendor/github.com/golang/protobuf/conformance/failure_list_go.txt b/vendor/github.com/golang/protobuf/conformance/failure_list_go.txt deleted file mode 100644 index d3728089..00000000 --- a/vendor/github.com/golang/protobuf/conformance/failure_list_go.txt +++ /dev/null @@ -1,61 +0,0 @@ -# This is the list of conformance tests that are known ot fail right now. -# TODO: These should be fixed. - -DurationProtoInputTooLarge.JsonOutput -DurationProtoInputTooSmall.JsonOutput -FieldMaskNumbersDontRoundTrip.JsonOutput -FieldMaskPathsDontRoundTrip.JsonOutput -FieldMaskTooManyUnderscore.JsonOutput -JsonInput.AnyWithFieldMask.JsonOutput -JsonInput.AnyWithFieldMask.ProtobufOutput -JsonInput.DoubleFieldQuotedValue.JsonOutput -JsonInput.DoubleFieldQuotedValue.ProtobufOutput -JsonInput.DurationHas3FractionalDigits.Validator -JsonInput.DurationHas6FractionalDigits.Validator -JsonInput.DurationHas9FractionalDigits.Validator -JsonInput.DurationHasZeroFractionalDigit.Validator -JsonInput.DurationMaxValue.JsonOutput -JsonInput.DurationMaxValue.ProtobufOutput -JsonInput.DurationMinValue.JsonOutput -JsonInput.DurationMinValue.ProtobufOutput -JsonInput.EnumFieldUnknownValue.Validator -JsonInput.FieldMask.JsonOutput -JsonInput.FieldMask.ProtobufOutput -JsonInput.FieldNameInLowerCamelCase.Validator -JsonInput.FieldNameWithMixedCases.JsonOutput -JsonInput.FieldNameWithMixedCases.ProtobufOutput -JsonInput.FieldNameWithMixedCases.Validator -JsonInput.FieldNameWithNumbers.Validator -JsonInput.FloatFieldQuotedValue.JsonOutput -JsonInput.FloatFieldQuotedValue.ProtobufOutput -JsonInput.Int32FieldExponentialFormat.JsonOutput -JsonInput.Int32FieldExponentialFormat.ProtobufOutput -JsonInput.Int32FieldFloatTrailingZero.JsonOutput -JsonInput.Int32FieldFloatTrailingZero.ProtobufOutput -JsonInput.Int32FieldMaxFloatValue.JsonOutput -JsonInput.Int32FieldMaxFloatValue.ProtobufOutput -JsonInput.Int32FieldMinFloatValue.JsonOutput -JsonInput.Int32FieldMinFloatValue.ProtobufOutput -JsonInput.Int32FieldStringValue.JsonOutput -JsonInput.Int32FieldStringValue.ProtobufOutput -JsonInput.Int32FieldStringValueEscaped.JsonOutput -JsonInput.Int32FieldStringValueEscaped.ProtobufOutput -JsonInput.Int64FieldBeString.Validator -JsonInput.MapFieldValueIsNull -JsonInput.OneofFieldDuplicate -JsonInput.RepeatedFieldMessageElementIsNull -JsonInput.RepeatedFieldPrimitiveElementIsNull -JsonInput.StringFieldSurrogateInWrongOrder -JsonInput.StringFieldUnpairedHighSurrogate -JsonInput.StringFieldUnpairedLowSurrogate -JsonInput.TimestampHas3FractionalDigits.Validator -JsonInput.TimestampHas6FractionalDigits.Validator -JsonInput.TimestampHas9FractionalDigits.Validator -JsonInput.TimestampHasZeroFractionalDigit.Validator -JsonInput.TimestampJsonInputTooSmall -JsonInput.TimestampZeroNormalized.Validator -JsonInput.Uint32FieldMaxFloatValue.JsonOutput -JsonInput.Uint32FieldMaxFloatValue.ProtobufOutput -JsonInput.Uint64FieldBeString.Validator -TimestampProtoInputTooLarge.JsonOutput -TimestampProtoInputTooSmall.JsonOutput diff --git a/vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.pb.go b/vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.pb.go deleted file mode 100644 index 15102e85..00000000 --- a/vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.pb.go +++ /dev/null @@ -1,1834 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: conformance.proto - -package conformance - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import any "github.com/golang/protobuf/ptypes/any" -import duration "github.com/golang/protobuf/ptypes/duration" -import _struct "github.com/golang/protobuf/ptypes/struct" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" -import wrappers "github.com/golang/protobuf/ptypes/wrappers" -import field_mask "google.golang.org/genproto/protobuf/field_mask" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type WireFormat int32 - -const ( - WireFormat_UNSPECIFIED WireFormat = 0 - WireFormat_PROTOBUF WireFormat = 1 - WireFormat_JSON WireFormat = 2 -) - -var WireFormat_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "PROTOBUF", - 2: "JSON", -} -var WireFormat_value = map[string]int32{ - "UNSPECIFIED": 0, - "PROTOBUF": 1, - "JSON": 2, -} - -func (x WireFormat) String() string { - return proto.EnumName(WireFormat_name, int32(x)) -} -func (WireFormat) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_conformance_48ac832451f5d6c3, []int{0} -} - -type ForeignEnum int32 - -const ( - ForeignEnum_FOREIGN_FOO ForeignEnum = 0 - ForeignEnum_FOREIGN_BAR ForeignEnum = 1 - ForeignEnum_FOREIGN_BAZ ForeignEnum = 2 -) - -var ForeignEnum_name = map[int32]string{ - 0: "FOREIGN_FOO", - 1: "FOREIGN_BAR", - 2: "FOREIGN_BAZ", -} -var ForeignEnum_value = map[string]int32{ - "FOREIGN_FOO": 0, - "FOREIGN_BAR": 1, - "FOREIGN_BAZ": 2, -} - -func (x ForeignEnum) String() string { - return proto.EnumName(ForeignEnum_name, int32(x)) -} -func (ForeignEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_conformance_48ac832451f5d6c3, []int{1} -} - -type TestAllTypes_NestedEnum int32 - -const ( - TestAllTypes_FOO TestAllTypes_NestedEnum = 0 - TestAllTypes_BAR TestAllTypes_NestedEnum = 1 - TestAllTypes_BAZ TestAllTypes_NestedEnum = 2 - TestAllTypes_NEG TestAllTypes_NestedEnum = -1 -) - -var TestAllTypes_NestedEnum_name = map[int32]string{ - 0: "FOO", - 1: "BAR", - 2: "BAZ", - -1: "NEG", -} -var TestAllTypes_NestedEnum_value = map[string]int32{ - "FOO": 0, - "BAR": 1, - "BAZ": 2, - "NEG": -1, -} - -func (x TestAllTypes_NestedEnum) String() string { - return proto.EnumName(TestAllTypes_NestedEnum_name, int32(x)) -} -func (TestAllTypes_NestedEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_conformance_48ac832451f5d6c3, []int{2, 0} -} - -// Represents a single test case's input. The testee should: -// -// 1. parse this proto (which should always succeed) -// 2. parse the protobuf or JSON payload in "payload" (which may fail) -// 3. if the parse succeeded, serialize the message in the requested format. -type ConformanceRequest struct { - // The payload (whether protobuf of JSON) is always for a TestAllTypes proto - // (see below). - // - // Types that are valid to be assigned to Payload: - // *ConformanceRequest_ProtobufPayload - // *ConformanceRequest_JsonPayload - Payload isConformanceRequest_Payload `protobuf_oneof:"payload"` - // Which format should the testee serialize its message to? - RequestedOutputFormat WireFormat `protobuf:"varint,3,opt,name=requested_output_format,json=requestedOutputFormat,proto3,enum=conformance.WireFormat" json:"requested_output_format,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConformanceRequest) Reset() { *m = ConformanceRequest{} } -func (m *ConformanceRequest) String() string { return proto.CompactTextString(m) } -func (*ConformanceRequest) ProtoMessage() {} -func (*ConformanceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_conformance_48ac832451f5d6c3, []int{0} -} -func (m *ConformanceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConformanceRequest.Unmarshal(m, b) -} -func (m *ConformanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConformanceRequest.Marshal(b, m, deterministic) -} -func (dst *ConformanceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConformanceRequest.Merge(dst, src) -} -func (m *ConformanceRequest) XXX_Size() int { - return xxx_messageInfo_ConformanceRequest.Size(m) -} -func (m *ConformanceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConformanceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConformanceRequest proto.InternalMessageInfo - -type isConformanceRequest_Payload interface { - isConformanceRequest_Payload() -} - -type ConformanceRequest_ProtobufPayload struct { - ProtobufPayload []byte `protobuf:"bytes,1,opt,name=protobuf_payload,json=protobufPayload,proto3,oneof"` -} - -type ConformanceRequest_JsonPayload struct { - JsonPayload string `protobuf:"bytes,2,opt,name=json_payload,json=jsonPayload,proto3,oneof"` -} - -func (*ConformanceRequest_ProtobufPayload) isConformanceRequest_Payload() {} - -func (*ConformanceRequest_JsonPayload) isConformanceRequest_Payload() {} - -func (m *ConformanceRequest) GetPayload() isConformanceRequest_Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *ConformanceRequest) GetProtobufPayload() []byte { - if x, ok := m.GetPayload().(*ConformanceRequest_ProtobufPayload); ok { - return x.ProtobufPayload - } - return nil -} - -func (m *ConformanceRequest) GetJsonPayload() string { - if x, ok := m.GetPayload().(*ConformanceRequest_JsonPayload); ok { - return x.JsonPayload - } - return "" -} - -func (m *ConformanceRequest) GetRequestedOutputFormat() WireFormat { - if m != nil { - return m.RequestedOutputFormat - } - return WireFormat_UNSPECIFIED -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ConformanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ConformanceRequest_OneofMarshaler, _ConformanceRequest_OneofUnmarshaler, _ConformanceRequest_OneofSizer, []interface{}{ - (*ConformanceRequest_ProtobufPayload)(nil), - (*ConformanceRequest_JsonPayload)(nil), - } -} - -func _ConformanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ConformanceRequest) - // payload - switch x := m.Payload.(type) { - case *ConformanceRequest_ProtobufPayload: - b.EncodeVarint(1<<3 | proto.WireBytes) - b.EncodeRawBytes(x.ProtobufPayload) - case *ConformanceRequest_JsonPayload: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeStringBytes(x.JsonPayload) - case nil: - default: - return fmt.Errorf("ConformanceRequest.Payload has unexpected type %T", x) - } - return nil -} - -func _ConformanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ConformanceRequest) - switch tag { - case 1: // payload.protobuf_payload - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Payload = &ConformanceRequest_ProtobufPayload{x} - return true, err - case 2: // payload.json_payload - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Payload = &ConformanceRequest_JsonPayload{x} - return true, err - default: - return false, nil - } -} - -func _ConformanceRequest_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ConformanceRequest) - // payload - switch x := m.Payload.(type) { - case *ConformanceRequest_ProtobufPayload: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.ProtobufPayload))) - n += len(x.ProtobufPayload) - case *ConformanceRequest_JsonPayload: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.JsonPayload))) - n += len(x.JsonPayload) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Represents a single test case's output. -type ConformanceResponse struct { - // Types that are valid to be assigned to Result: - // *ConformanceResponse_ParseError - // *ConformanceResponse_SerializeError - // *ConformanceResponse_RuntimeError - // *ConformanceResponse_ProtobufPayload - // *ConformanceResponse_JsonPayload - // *ConformanceResponse_Skipped - Result isConformanceResponse_Result `protobuf_oneof:"result"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConformanceResponse) Reset() { *m = ConformanceResponse{} } -func (m *ConformanceResponse) String() string { return proto.CompactTextString(m) } -func (*ConformanceResponse) ProtoMessage() {} -func (*ConformanceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_conformance_48ac832451f5d6c3, []int{1} -} -func (m *ConformanceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConformanceResponse.Unmarshal(m, b) -} -func (m *ConformanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConformanceResponse.Marshal(b, m, deterministic) -} -func (dst *ConformanceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConformanceResponse.Merge(dst, src) -} -func (m *ConformanceResponse) XXX_Size() int { - return xxx_messageInfo_ConformanceResponse.Size(m) -} -func (m *ConformanceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ConformanceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ConformanceResponse proto.InternalMessageInfo - -type isConformanceResponse_Result interface { - isConformanceResponse_Result() -} - -type ConformanceResponse_ParseError struct { - ParseError string `protobuf:"bytes,1,opt,name=parse_error,json=parseError,proto3,oneof"` -} - -type ConformanceResponse_SerializeError struct { - SerializeError string `protobuf:"bytes,6,opt,name=serialize_error,json=serializeError,proto3,oneof"` -} - -type ConformanceResponse_RuntimeError struct { - RuntimeError string `protobuf:"bytes,2,opt,name=runtime_error,json=runtimeError,proto3,oneof"` -} - -type ConformanceResponse_ProtobufPayload struct { - ProtobufPayload []byte `protobuf:"bytes,3,opt,name=protobuf_payload,json=protobufPayload,proto3,oneof"` -} - -type ConformanceResponse_JsonPayload struct { - JsonPayload string `protobuf:"bytes,4,opt,name=json_payload,json=jsonPayload,proto3,oneof"` -} - -type ConformanceResponse_Skipped struct { - Skipped string `protobuf:"bytes,5,opt,name=skipped,proto3,oneof"` -} - -func (*ConformanceResponse_ParseError) isConformanceResponse_Result() {} - -func (*ConformanceResponse_SerializeError) isConformanceResponse_Result() {} - -func (*ConformanceResponse_RuntimeError) isConformanceResponse_Result() {} - -func (*ConformanceResponse_ProtobufPayload) isConformanceResponse_Result() {} - -func (*ConformanceResponse_JsonPayload) isConformanceResponse_Result() {} - -func (*ConformanceResponse_Skipped) isConformanceResponse_Result() {} - -func (m *ConformanceResponse) GetResult() isConformanceResponse_Result { - if m != nil { - return m.Result - } - return nil -} - -func (m *ConformanceResponse) GetParseError() string { - if x, ok := m.GetResult().(*ConformanceResponse_ParseError); ok { - return x.ParseError - } - return "" -} - -func (m *ConformanceResponse) GetSerializeError() string { - if x, ok := m.GetResult().(*ConformanceResponse_SerializeError); ok { - return x.SerializeError - } - return "" -} - -func (m *ConformanceResponse) GetRuntimeError() string { - if x, ok := m.GetResult().(*ConformanceResponse_RuntimeError); ok { - return x.RuntimeError - } - return "" -} - -func (m *ConformanceResponse) GetProtobufPayload() []byte { - if x, ok := m.GetResult().(*ConformanceResponse_ProtobufPayload); ok { - return x.ProtobufPayload - } - return nil -} - -func (m *ConformanceResponse) GetJsonPayload() string { - if x, ok := m.GetResult().(*ConformanceResponse_JsonPayload); ok { - return x.JsonPayload - } - return "" -} - -func (m *ConformanceResponse) GetSkipped() string { - if x, ok := m.GetResult().(*ConformanceResponse_Skipped); ok { - return x.Skipped - } - return "" -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ConformanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ConformanceResponse_OneofMarshaler, _ConformanceResponse_OneofUnmarshaler, _ConformanceResponse_OneofSizer, []interface{}{ - (*ConformanceResponse_ParseError)(nil), - (*ConformanceResponse_SerializeError)(nil), - (*ConformanceResponse_RuntimeError)(nil), - (*ConformanceResponse_ProtobufPayload)(nil), - (*ConformanceResponse_JsonPayload)(nil), - (*ConformanceResponse_Skipped)(nil), - } -} - -func _ConformanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ConformanceResponse) - // result - switch x := m.Result.(type) { - case *ConformanceResponse_ParseError: - b.EncodeVarint(1<<3 | proto.WireBytes) - b.EncodeStringBytes(x.ParseError) - case *ConformanceResponse_SerializeError: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeStringBytes(x.SerializeError) - case *ConformanceResponse_RuntimeError: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeStringBytes(x.RuntimeError) - case *ConformanceResponse_ProtobufPayload: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeRawBytes(x.ProtobufPayload) - case *ConformanceResponse_JsonPayload: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeStringBytes(x.JsonPayload) - case *ConformanceResponse_Skipped: - b.EncodeVarint(5<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Skipped) - case nil: - default: - return fmt.Errorf("ConformanceResponse.Result has unexpected type %T", x) - } - return nil -} - -func _ConformanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ConformanceResponse) - switch tag { - case 1: // result.parse_error - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Result = &ConformanceResponse_ParseError{x} - return true, err - case 6: // result.serialize_error - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Result = &ConformanceResponse_SerializeError{x} - return true, err - case 2: // result.runtime_error - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Result = &ConformanceResponse_RuntimeError{x} - return true, err - case 3: // result.protobuf_payload - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Result = &ConformanceResponse_ProtobufPayload{x} - return true, err - case 4: // result.json_payload - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Result = &ConformanceResponse_JsonPayload{x} - return true, err - case 5: // result.skipped - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Result = &ConformanceResponse_Skipped{x} - return true, err - default: - return false, nil - } -} - -func _ConformanceResponse_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ConformanceResponse) - // result - switch x := m.Result.(type) { - case *ConformanceResponse_ParseError: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.ParseError))) - n += len(x.ParseError) - case *ConformanceResponse_SerializeError: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.SerializeError))) - n += len(x.SerializeError) - case *ConformanceResponse_RuntimeError: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.RuntimeError))) - n += len(x.RuntimeError) - case *ConformanceResponse_ProtobufPayload: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.ProtobufPayload))) - n += len(x.ProtobufPayload) - case *ConformanceResponse_JsonPayload: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.JsonPayload))) - n += len(x.JsonPayload) - case *ConformanceResponse_Skipped: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Skipped))) - n += len(x.Skipped) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// This proto includes every type of field in both singular and repeated -// forms. -type TestAllTypes struct { - // Singular - OptionalInt32 int32 `protobuf:"varint,1,opt,name=optional_int32,json=optionalInt32,proto3" json:"optional_int32,omitempty"` - OptionalInt64 int64 `protobuf:"varint,2,opt,name=optional_int64,json=optionalInt64,proto3" json:"optional_int64,omitempty"` - OptionalUint32 uint32 `protobuf:"varint,3,opt,name=optional_uint32,json=optionalUint32,proto3" json:"optional_uint32,omitempty"` - OptionalUint64 uint64 `protobuf:"varint,4,opt,name=optional_uint64,json=optionalUint64,proto3" json:"optional_uint64,omitempty"` - OptionalSint32 int32 `protobuf:"zigzag32,5,opt,name=optional_sint32,json=optionalSint32,proto3" json:"optional_sint32,omitempty"` - OptionalSint64 int64 `protobuf:"zigzag64,6,opt,name=optional_sint64,json=optionalSint64,proto3" json:"optional_sint64,omitempty"` - OptionalFixed32 uint32 `protobuf:"fixed32,7,opt,name=optional_fixed32,json=optionalFixed32,proto3" json:"optional_fixed32,omitempty"` - OptionalFixed64 uint64 `protobuf:"fixed64,8,opt,name=optional_fixed64,json=optionalFixed64,proto3" json:"optional_fixed64,omitempty"` - OptionalSfixed32 int32 `protobuf:"fixed32,9,opt,name=optional_sfixed32,json=optionalSfixed32,proto3" json:"optional_sfixed32,omitempty"` - OptionalSfixed64 int64 `protobuf:"fixed64,10,opt,name=optional_sfixed64,json=optionalSfixed64,proto3" json:"optional_sfixed64,omitempty"` - OptionalFloat float32 `protobuf:"fixed32,11,opt,name=optional_float,json=optionalFloat,proto3" json:"optional_float,omitempty"` - OptionalDouble float64 `protobuf:"fixed64,12,opt,name=optional_double,json=optionalDouble,proto3" json:"optional_double,omitempty"` - OptionalBool bool `protobuf:"varint,13,opt,name=optional_bool,json=optionalBool,proto3" json:"optional_bool,omitempty"` - OptionalString string `protobuf:"bytes,14,opt,name=optional_string,json=optionalString,proto3" json:"optional_string,omitempty"` - OptionalBytes []byte `protobuf:"bytes,15,opt,name=optional_bytes,json=optionalBytes,proto3" json:"optional_bytes,omitempty"` - OptionalNestedMessage *TestAllTypes_NestedMessage `protobuf:"bytes,18,opt,name=optional_nested_message,json=optionalNestedMessage,proto3" json:"optional_nested_message,omitempty"` - OptionalForeignMessage *ForeignMessage `protobuf:"bytes,19,opt,name=optional_foreign_message,json=optionalForeignMessage,proto3" json:"optional_foreign_message,omitempty"` - OptionalNestedEnum TestAllTypes_NestedEnum `protobuf:"varint,21,opt,name=optional_nested_enum,json=optionalNestedEnum,proto3,enum=conformance.TestAllTypes_NestedEnum" json:"optional_nested_enum,omitempty"` - OptionalForeignEnum ForeignEnum `protobuf:"varint,22,opt,name=optional_foreign_enum,json=optionalForeignEnum,proto3,enum=conformance.ForeignEnum" json:"optional_foreign_enum,omitempty"` - OptionalStringPiece string `protobuf:"bytes,24,opt,name=optional_string_piece,json=optionalStringPiece,proto3" json:"optional_string_piece,omitempty"` - OptionalCord string `protobuf:"bytes,25,opt,name=optional_cord,json=optionalCord,proto3" json:"optional_cord,omitempty"` - RecursiveMessage *TestAllTypes `protobuf:"bytes,27,opt,name=recursive_message,json=recursiveMessage,proto3" json:"recursive_message,omitempty"` - // Repeated - RepeatedInt32 []int32 `protobuf:"varint,31,rep,packed,name=repeated_int32,json=repeatedInt32,proto3" json:"repeated_int32,omitempty"` - RepeatedInt64 []int64 `protobuf:"varint,32,rep,packed,name=repeated_int64,json=repeatedInt64,proto3" json:"repeated_int64,omitempty"` - RepeatedUint32 []uint32 `protobuf:"varint,33,rep,packed,name=repeated_uint32,json=repeatedUint32,proto3" json:"repeated_uint32,omitempty"` - RepeatedUint64 []uint64 `protobuf:"varint,34,rep,packed,name=repeated_uint64,json=repeatedUint64,proto3" json:"repeated_uint64,omitempty"` - RepeatedSint32 []int32 `protobuf:"zigzag32,35,rep,packed,name=repeated_sint32,json=repeatedSint32,proto3" json:"repeated_sint32,omitempty"` - RepeatedSint64 []int64 `protobuf:"zigzag64,36,rep,packed,name=repeated_sint64,json=repeatedSint64,proto3" json:"repeated_sint64,omitempty"` - RepeatedFixed32 []uint32 `protobuf:"fixed32,37,rep,packed,name=repeated_fixed32,json=repeatedFixed32,proto3" json:"repeated_fixed32,omitempty"` - RepeatedFixed64 []uint64 `protobuf:"fixed64,38,rep,packed,name=repeated_fixed64,json=repeatedFixed64,proto3" json:"repeated_fixed64,omitempty"` - RepeatedSfixed32 []int32 `protobuf:"fixed32,39,rep,packed,name=repeated_sfixed32,json=repeatedSfixed32,proto3" json:"repeated_sfixed32,omitempty"` - RepeatedSfixed64 []int64 `protobuf:"fixed64,40,rep,packed,name=repeated_sfixed64,json=repeatedSfixed64,proto3" json:"repeated_sfixed64,omitempty"` - RepeatedFloat []float32 `protobuf:"fixed32,41,rep,packed,name=repeated_float,json=repeatedFloat,proto3" json:"repeated_float,omitempty"` - RepeatedDouble []float64 `protobuf:"fixed64,42,rep,packed,name=repeated_double,json=repeatedDouble,proto3" json:"repeated_double,omitempty"` - RepeatedBool []bool `protobuf:"varint,43,rep,packed,name=repeated_bool,json=repeatedBool,proto3" json:"repeated_bool,omitempty"` - RepeatedString []string `protobuf:"bytes,44,rep,name=repeated_string,json=repeatedString,proto3" json:"repeated_string,omitempty"` - RepeatedBytes [][]byte `protobuf:"bytes,45,rep,name=repeated_bytes,json=repeatedBytes,proto3" json:"repeated_bytes,omitempty"` - RepeatedNestedMessage []*TestAllTypes_NestedMessage `protobuf:"bytes,48,rep,name=repeated_nested_message,json=repeatedNestedMessage,proto3" json:"repeated_nested_message,omitempty"` - RepeatedForeignMessage []*ForeignMessage `protobuf:"bytes,49,rep,name=repeated_foreign_message,json=repeatedForeignMessage,proto3" json:"repeated_foreign_message,omitempty"` - RepeatedNestedEnum []TestAllTypes_NestedEnum `protobuf:"varint,51,rep,packed,name=repeated_nested_enum,json=repeatedNestedEnum,proto3,enum=conformance.TestAllTypes_NestedEnum" json:"repeated_nested_enum,omitempty"` - RepeatedForeignEnum []ForeignEnum `protobuf:"varint,52,rep,packed,name=repeated_foreign_enum,json=repeatedForeignEnum,proto3,enum=conformance.ForeignEnum" json:"repeated_foreign_enum,omitempty"` - RepeatedStringPiece []string `protobuf:"bytes,54,rep,name=repeated_string_piece,json=repeatedStringPiece,proto3" json:"repeated_string_piece,omitempty"` - RepeatedCord []string `protobuf:"bytes,55,rep,name=repeated_cord,json=repeatedCord,proto3" json:"repeated_cord,omitempty"` - // Map - MapInt32Int32 map[int32]int32 `protobuf:"bytes,56,rep,name=map_int32_int32,json=mapInt32Int32,proto3" json:"map_int32_int32,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - MapInt64Int64 map[int64]int64 `protobuf:"bytes,57,rep,name=map_int64_int64,json=mapInt64Int64,proto3" json:"map_int64_int64,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - MapUint32Uint32 map[uint32]uint32 `protobuf:"bytes,58,rep,name=map_uint32_uint32,json=mapUint32Uint32,proto3" json:"map_uint32_uint32,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - MapUint64Uint64 map[uint64]uint64 `protobuf:"bytes,59,rep,name=map_uint64_uint64,json=mapUint64Uint64,proto3" json:"map_uint64_uint64,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - MapSint32Sint32 map[int32]int32 `protobuf:"bytes,60,rep,name=map_sint32_sint32,json=mapSint32Sint32,proto3" json:"map_sint32_sint32,omitempty" protobuf_key:"zigzag32,1,opt,name=key,proto3" protobuf_val:"zigzag32,2,opt,name=value,proto3"` - MapSint64Sint64 map[int64]int64 `protobuf:"bytes,61,rep,name=map_sint64_sint64,json=mapSint64Sint64,proto3" json:"map_sint64_sint64,omitempty" protobuf_key:"zigzag64,1,opt,name=key,proto3" protobuf_val:"zigzag64,2,opt,name=value,proto3"` - MapFixed32Fixed32 map[uint32]uint32 `protobuf:"bytes,62,rep,name=map_fixed32_fixed32,json=mapFixed32Fixed32,proto3" json:"map_fixed32_fixed32,omitempty" protobuf_key:"fixed32,1,opt,name=key,proto3" protobuf_val:"fixed32,2,opt,name=value,proto3"` - MapFixed64Fixed64 map[uint64]uint64 `protobuf:"bytes,63,rep,name=map_fixed64_fixed64,json=mapFixed64Fixed64,proto3" json:"map_fixed64_fixed64,omitempty" protobuf_key:"fixed64,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` - MapSfixed32Sfixed32 map[int32]int32 `protobuf:"bytes,64,rep,name=map_sfixed32_sfixed32,json=mapSfixed32Sfixed32,proto3" json:"map_sfixed32_sfixed32,omitempty" protobuf_key:"fixed32,1,opt,name=key,proto3" protobuf_val:"fixed32,2,opt,name=value,proto3"` - MapSfixed64Sfixed64 map[int64]int64 `protobuf:"bytes,65,rep,name=map_sfixed64_sfixed64,json=mapSfixed64Sfixed64,proto3" json:"map_sfixed64_sfixed64,omitempty" protobuf_key:"fixed64,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` - MapInt32Float map[int32]float32 `protobuf:"bytes,66,rep,name=map_int32_float,json=mapInt32Float,proto3" json:"map_int32_float,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"fixed32,2,opt,name=value,proto3"` - MapInt32Double map[int32]float64 `protobuf:"bytes,67,rep,name=map_int32_double,json=mapInt32Double,proto3" json:"map_int32_double,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` - MapBoolBool map[bool]bool `protobuf:"bytes,68,rep,name=map_bool_bool,json=mapBoolBool,proto3" json:"map_bool_bool,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - MapStringString map[string]string `protobuf:"bytes,69,rep,name=map_string_string,json=mapStringString,proto3" json:"map_string_string,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - MapStringBytes map[string][]byte `protobuf:"bytes,70,rep,name=map_string_bytes,json=mapStringBytes,proto3" json:"map_string_bytes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - MapStringNestedMessage map[string]*TestAllTypes_NestedMessage `protobuf:"bytes,71,rep,name=map_string_nested_message,json=mapStringNestedMessage,proto3" json:"map_string_nested_message,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - MapStringForeignMessage map[string]*ForeignMessage `protobuf:"bytes,72,rep,name=map_string_foreign_message,json=mapStringForeignMessage,proto3" json:"map_string_foreign_message,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - MapStringNestedEnum map[string]TestAllTypes_NestedEnum `protobuf:"bytes,73,rep,name=map_string_nested_enum,json=mapStringNestedEnum,proto3" json:"map_string_nested_enum,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=conformance.TestAllTypes_NestedEnum"` - MapStringForeignEnum map[string]ForeignEnum `protobuf:"bytes,74,rep,name=map_string_foreign_enum,json=mapStringForeignEnum,proto3" json:"map_string_foreign_enum,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=conformance.ForeignEnum"` - // Types that are valid to be assigned to OneofField: - // *TestAllTypes_OneofUint32 - // *TestAllTypes_OneofNestedMessage - // *TestAllTypes_OneofString - // *TestAllTypes_OneofBytes - OneofField isTestAllTypes_OneofField `protobuf_oneof:"oneof_field"` - // Well-known types - OptionalBoolWrapper *wrappers.BoolValue `protobuf:"bytes,201,opt,name=optional_bool_wrapper,json=optionalBoolWrapper,proto3" json:"optional_bool_wrapper,omitempty"` - OptionalInt32Wrapper *wrappers.Int32Value `protobuf:"bytes,202,opt,name=optional_int32_wrapper,json=optionalInt32Wrapper,proto3" json:"optional_int32_wrapper,omitempty"` - OptionalInt64Wrapper *wrappers.Int64Value `protobuf:"bytes,203,opt,name=optional_int64_wrapper,json=optionalInt64Wrapper,proto3" json:"optional_int64_wrapper,omitempty"` - OptionalUint32Wrapper *wrappers.UInt32Value `protobuf:"bytes,204,opt,name=optional_uint32_wrapper,json=optionalUint32Wrapper,proto3" json:"optional_uint32_wrapper,omitempty"` - OptionalUint64Wrapper *wrappers.UInt64Value `protobuf:"bytes,205,opt,name=optional_uint64_wrapper,json=optionalUint64Wrapper,proto3" json:"optional_uint64_wrapper,omitempty"` - OptionalFloatWrapper *wrappers.FloatValue `protobuf:"bytes,206,opt,name=optional_float_wrapper,json=optionalFloatWrapper,proto3" json:"optional_float_wrapper,omitempty"` - OptionalDoubleWrapper *wrappers.DoubleValue `protobuf:"bytes,207,opt,name=optional_double_wrapper,json=optionalDoubleWrapper,proto3" json:"optional_double_wrapper,omitempty"` - OptionalStringWrapper *wrappers.StringValue `protobuf:"bytes,208,opt,name=optional_string_wrapper,json=optionalStringWrapper,proto3" json:"optional_string_wrapper,omitempty"` - OptionalBytesWrapper *wrappers.BytesValue `protobuf:"bytes,209,opt,name=optional_bytes_wrapper,json=optionalBytesWrapper,proto3" json:"optional_bytes_wrapper,omitempty"` - RepeatedBoolWrapper []*wrappers.BoolValue `protobuf:"bytes,211,rep,name=repeated_bool_wrapper,json=repeatedBoolWrapper,proto3" json:"repeated_bool_wrapper,omitempty"` - RepeatedInt32Wrapper []*wrappers.Int32Value `protobuf:"bytes,212,rep,name=repeated_int32_wrapper,json=repeatedInt32Wrapper,proto3" json:"repeated_int32_wrapper,omitempty"` - RepeatedInt64Wrapper []*wrappers.Int64Value `protobuf:"bytes,213,rep,name=repeated_int64_wrapper,json=repeatedInt64Wrapper,proto3" json:"repeated_int64_wrapper,omitempty"` - RepeatedUint32Wrapper []*wrappers.UInt32Value `protobuf:"bytes,214,rep,name=repeated_uint32_wrapper,json=repeatedUint32Wrapper,proto3" json:"repeated_uint32_wrapper,omitempty"` - RepeatedUint64Wrapper []*wrappers.UInt64Value `protobuf:"bytes,215,rep,name=repeated_uint64_wrapper,json=repeatedUint64Wrapper,proto3" json:"repeated_uint64_wrapper,omitempty"` - RepeatedFloatWrapper []*wrappers.FloatValue `protobuf:"bytes,216,rep,name=repeated_float_wrapper,json=repeatedFloatWrapper,proto3" json:"repeated_float_wrapper,omitempty"` - RepeatedDoubleWrapper []*wrappers.DoubleValue `protobuf:"bytes,217,rep,name=repeated_double_wrapper,json=repeatedDoubleWrapper,proto3" json:"repeated_double_wrapper,omitempty"` - RepeatedStringWrapper []*wrappers.StringValue `protobuf:"bytes,218,rep,name=repeated_string_wrapper,json=repeatedStringWrapper,proto3" json:"repeated_string_wrapper,omitempty"` - RepeatedBytesWrapper []*wrappers.BytesValue `protobuf:"bytes,219,rep,name=repeated_bytes_wrapper,json=repeatedBytesWrapper,proto3" json:"repeated_bytes_wrapper,omitempty"` - OptionalDuration *duration.Duration `protobuf:"bytes,301,opt,name=optional_duration,json=optionalDuration,proto3" json:"optional_duration,omitempty"` - OptionalTimestamp *timestamp.Timestamp `protobuf:"bytes,302,opt,name=optional_timestamp,json=optionalTimestamp,proto3" json:"optional_timestamp,omitempty"` - OptionalFieldMask *field_mask.FieldMask `protobuf:"bytes,303,opt,name=optional_field_mask,json=optionalFieldMask,proto3" json:"optional_field_mask,omitempty"` - OptionalStruct *_struct.Struct `protobuf:"bytes,304,opt,name=optional_struct,json=optionalStruct,proto3" json:"optional_struct,omitempty"` - OptionalAny *any.Any `protobuf:"bytes,305,opt,name=optional_any,json=optionalAny,proto3" json:"optional_any,omitempty"` - OptionalValue *_struct.Value `protobuf:"bytes,306,opt,name=optional_value,json=optionalValue,proto3" json:"optional_value,omitempty"` - RepeatedDuration []*duration.Duration `protobuf:"bytes,311,rep,name=repeated_duration,json=repeatedDuration,proto3" json:"repeated_duration,omitempty"` - RepeatedTimestamp []*timestamp.Timestamp `protobuf:"bytes,312,rep,name=repeated_timestamp,json=repeatedTimestamp,proto3" json:"repeated_timestamp,omitempty"` - RepeatedFieldmask []*field_mask.FieldMask `protobuf:"bytes,313,rep,name=repeated_fieldmask,json=repeatedFieldmask,proto3" json:"repeated_fieldmask,omitempty"` - RepeatedStruct []*_struct.Struct `protobuf:"bytes,324,rep,name=repeated_struct,json=repeatedStruct,proto3" json:"repeated_struct,omitempty"` - RepeatedAny []*any.Any `protobuf:"bytes,315,rep,name=repeated_any,json=repeatedAny,proto3" json:"repeated_any,omitempty"` - RepeatedValue []*_struct.Value `protobuf:"bytes,316,rep,name=repeated_value,json=repeatedValue,proto3" json:"repeated_value,omitempty"` - // Test field-name-to-JSON-name convention. - Fieldname1 int32 `protobuf:"varint,401,opt,name=fieldname1,proto3" json:"fieldname1,omitempty"` - FieldName2 int32 `protobuf:"varint,402,opt,name=field_name2,json=fieldName2,proto3" json:"field_name2,omitempty"` - XFieldName3 int32 `protobuf:"varint,403,opt,name=_field_name3,json=FieldName3,proto3" json:"_field_name3,omitempty"` - Field_Name4_ int32 `protobuf:"varint,404,opt,name=field__name4_,json=fieldName4,proto3" json:"field__name4_,omitempty"` - Field0Name5 int32 `protobuf:"varint,405,opt,name=field0name5,proto3" json:"field0name5,omitempty"` - Field_0Name6 int32 `protobuf:"varint,406,opt,name=field_0_name6,json=field0Name6,proto3" json:"field_0_name6,omitempty"` - FieldName7 int32 `protobuf:"varint,407,opt,name=fieldName7,proto3" json:"fieldName7,omitempty"` - FieldName8 int32 `protobuf:"varint,408,opt,name=FieldName8,proto3" json:"FieldName8,omitempty"` - Field_Name9 int32 `protobuf:"varint,409,opt,name=field_Name9,json=fieldName9,proto3" json:"field_Name9,omitempty"` - Field_Name10 int32 `protobuf:"varint,410,opt,name=Field_Name10,json=FieldName10,proto3" json:"Field_Name10,omitempty"` - FIELD_NAME11 int32 `protobuf:"varint,411,opt,name=FIELD_NAME11,json=FIELDNAME11,proto3" json:"FIELD_NAME11,omitempty"` - FIELDName12 int32 `protobuf:"varint,412,opt,name=FIELD_name12,json=FIELDName12,proto3" json:"FIELD_name12,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TestAllTypes) Reset() { *m = TestAllTypes{} } -func (m *TestAllTypes) String() string { return proto.CompactTextString(m) } -func (*TestAllTypes) ProtoMessage() {} -func (*TestAllTypes) Descriptor() ([]byte, []int) { - return fileDescriptor_conformance_48ac832451f5d6c3, []int{2} -} -func (m *TestAllTypes) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestAllTypes.Unmarshal(m, b) -} -func (m *TestAllTypes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestAllTypes.Marshal(b, m, deterministic) -} -func (dst *TestAllTypes) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestAllTypes.Merge(dst, src) -} -func (m *TestAllTypes) XXX_Size() int { - return xxx_messageInfo_TestAllTypes.Size(m) -} -func (m *TestAllTypes) XXX_DiscardUnknown() { - xxx_messageInfo_TestAllTypes.DiscardUnknown(m) -} - -var xxx_messageInfo_TestAllTypes proto.InternalMessageInfo - -func (m *TestAllTypes) GetOptionalInt32() int32 { - if m != nil { - return m.OptionalInt32 - } - return 0 -} - -func (m *TestAllTypes) GetOptionalInt64() int64 { - if m != nil { - return m.OptionalInt64 - } - return 0 -} - -func (m *TestAllTypes) GetOptionalUint32() uint32 { - if m != nil { - return m.OptionalUint32 - } - return 0 -} - -func (m *TestAllTypes) GetOptionalUint64() uint64 { - if m != nil { - return m.OptionalUint64 - } - return 0 -} - -func (m *TestAllTypes) GetOptionalSint32() int32 { - if m != nil { - return m.OptionalSint32 - } - return 0 -} - -func (m *TestAllTypes) GetOptionalSint64() int64 { - if m != nil { - return m.OptionalSint64 - } - return 0 -} - -func (m *TestAllTypes) GetOptionalFixed32() uint32 { - if m != nil { - return m.OptionalFixed32 - } - return 0 -} - -func (m *TestAllTypes) GetOptionalFixed64() uint64 { - if m != nil { - return m.OptionalFixed64 - } - return 0 -} - -func (m *TestAllTypes) GetOptionalSfixed32() int32 { - if m != nil { - return m.OptionalSfixed32 - } - return 0 -} - -func (m *TestAllTypes) GetOptionalSfixed64() int64 { - if m != nil { - return m.OptionalSfixed64 - } - return 0 -} - -func (m *TestAllTypes) GetOptionalFloat() float32 { - if m != nil { - return m.OptionalFloat - } - return 0 -} - -func (m *TestAllTypes) GetOptionalDouble() float64 { - if m != nil { - return m.OptionalDouble - } - return 0 -} - -func (m *TestAllTypes) GetOptionalBool() bool { - if m != nil { - return m.OptionalBool - } - return false -} - -func (m *TestAllTypes) GetOptionalString() string { - if m != nil { - return m.OptionalString - } - return "" -} - -func (m *TestAllTypes) GetOptionalBytes() []byte { - if m != nil { - return m.OptionalBytes - } - return nil -} - -func (m *TestAllTypes) GetOptionalNestedMessage() *TestAllTypes_NestedMessage { - if m != nil { - return m.OptionalNestedMessage - } - return nil -} - -func (m *TestAllTypes) GetOptionalForeignMessage() *ForeignMessage { - if m != nil { - return m.OptionalForeignMessage - } - return nil -} - -func (m *TestAllTypes) GetOptionalNestedEnum() TestAllTypes_NestedEnum { - if m != nil { - return m.OptionalNestedEnum - } - return TestAllTypes_FOO -} - -func (m *TestAllTypes) GetOptionalForeignEnum() ForeignEnum { - if m != nil { - return m.OptionalForeignEnum - } - return ForeignEnum_FOREIGN_FOO -} - -func (m *TestAllTypes) GetOptionalStringPiece() string { - if m != nil { - return m.OptionalStringPiece - } - return "" -} - -func (m *TestAllTypes) GetOptionalCord() string { - if m != nil { - return m.OptionalCord - } - return "" -} - -func (m *TestAllTypes) GetRecursiveMessage() *TestAllTypes { - if m != nil { - return m.RecursiveMessage - } - return nil -} - -func (m *TestAllTypes) GetRepeatedInt32() []int32 { - if m != nil { - return m.RepeatedInt32 - } - return nil -} - -func (m *TestAllTypes) GetRepeatedInt64() []int64 { - if m != nil { - return m.RepeatedInt64 - } - return nil -} - -func (m *TestAllTypes) GetRepeatedUint32() []uint32 { - if m != nil { - return m.RepeatedUint32 - } - return nil -} - -func (m *TestAllTypes) GetRepeatedUint64() []uint64 { - if m != nil { - return m.RepeatedUint64 - } - return nil -} - -func (m *TestAllTypes) GetRepeatedSint32() []int32 { - if m != nil { - return m.RepeatedSint32 - } - return nil -} - -func (m *TestAllTypes) GetRepeatedSint64() []int64 { - if m != nil { - return m.RepeatedSint64 - } - return nil -} - -func (m *TestAllTypes) GetRepeatedFixed32() []uint32 { - if m != nil { - return m.RepeatedFixed32 - } - return nil -} - -func (m *TestAllTypes) GetRepeatedFixed64() []uint64 { - if m != nil { - return m.RepeatedFixed64 - } - return nil -} - -func (m *TestAllTypes) GetRepeatedSfixed32() []int32 { - if m != nil { - return m.RepeatedSfixed32 - } - return nil -} - -func (m *TestAllTypes) GetRepeatedSfixed64() []int64 { - if m != nil { - return m.RepeatedSfixed64 - } - return nil -} - -func (m *TestAllTypes) GetRepeatedFloat() []float32 { - if m != nil { - return m.RepeatedFloat - } - return nil -} - -func (m *TestAllTypes) GetRepeatedDouble() []float64 { - if m != nil { - return m.RepeatedDouble - } - return nil -} - -func (m *TestAllTypes) GetRepeatedBool() []bool { - if m != nil { - return m.RepeatedBool - } - return nil -} - -func (m *TestAllTypes) GetRepeatedString() []string { - if m != nil { - return m.RepeatedString - } - return nil -} - -func (m *TestAllTypes) GetRepeatedBytes() [][]byte { - if m != nil { - return m.RepeatedBytes - } - return nil -} - -func (m *TestAllTypes) GetRepeatedNestedMessage() []*TestAllTypes_NestedMessage { - if m != nil { - return m.RepeatedNestedMessage - } - return nil -} - -func (m *TestAllTypes) GetRepeatedForeignMessage() []*ForeignMessage { - if m != nil { - return m.RepeatedForeignMessage - } - return nil -} - -func (m *TestAllTypes) GetRepeatedNestedEnum() []TestAllTypes_NestedEnum { - if m != nil { - return m.RepeatedNestedEnum - } - return nil -} - -func (m *TestAllTypes) GetRepeatedForeignEnum() []ForeignEnum { - if m != nil { - return m.RepeatedForeignEnum - } - return nil -} - -func (m *TestAllTypes) GetRepeatedStringPiece() []string { - if m != nil { - return m.RepeatedStringPiece - } - return nil -} - -func (m *TestAllTypes) GetRepeatedCord() []string { - if m != nil { - return m.RepeatedCord - } - return nil -} - -func (m *TestAllTypes) GetMapInt32Int32() map[int32]int32 { - if m != nil { - return m.MapInt32Int32 - } - return nil -} - -func (m *TestAllTypes) GetMapInt64Int64() map[int64]int64 { - if m != nil { - return m.MapInt64Int64 - } - return nil -} - -func (m *TestAllTypes) GetMapUint32Uint32() map[uint32]uint32 { - if m != nil { - return m.MapUint32Uint32 - } - return nil -} - -func (m *TestAllTypes) GetMapUint64Uint64() map[uint64]uint64 { - if m != nil { - return m.MapUint64Uint64 - } - return nil -} - -func (m *TestAllTypes) GetMapSint32Sint32() map[int32]int32 { - if m != nil { - return m.MapSint32Sint32 - } - return nil -} - -func (m *TestAllTypes) GetMapSint64Sint64() map[int64]int64 { - if m != nil { - return m.MapSint64Sint64 - } - return nil -} - -func (m *TestAllTypes) GetMapFixed32Fixed32() map[uint32]uint32 { - if m != nil { - return m.MapFixed32Fixed32 - } - return nil -} - -func (m *TestAllTypes) GetMapFixed64Fixed64() map[uint64]uint64 { - if m != nil { - return m.MapFixed64Fixed64 - } - return nil -} - -func (m *TestAllTypes) GetMapSfixed32Sfixed32() map[int32]int32 { - if m != nil { - return m.MapSfixed32Sfixed32 - } - return nil -} - -func (m *TestAllTypes) GetMapSfixed64Sfixed64() map[int64]int64 { - if m != nil { - return m.MapSfixed64Sfixed64 - } - return nil -} - -func (m *TestAllTypes) GetMapInt32Float() map[int32]float32 { - if m != nil { - return m.MapInt32Float - } - return nil -} - -func (m *TestAllTypes) GetMapInt32Double() map[int32]float64 { - if m != nil { - return m.MapInt32Double - } - return nil -} - -func (m *TestAllTypes) GetMapBoolBool() map[bool]bool { - if m != nil { - return m.MapBoolBool - } - return nil -} - -func (m *TestAllTypes) GetMapStringString() map[string]string { - if m != nil { - return m.MapStringString - } - return nil -} - -func (m *TestAllTypes) GetMapStringBytes() map[string][]byte { - if m != nil { - return m.MapStringBytes - } - return nil -} - -func (m *TestAllTypes) GetMapStringNestedMessage() map[string]*TestAllTypes_NestedMessage { - if m != nil { - return m.MapStringNestedMessage - } - return nil -} - -func (m *TestAllTypes) GetMapStringForeignMessage() map[string]*ForeignMessage { - if m != nil { - return m.MapStringForeignMessage - } - return nil -} - -func (m *TestAllTypes) GetMapStringNestedEnum() map[string]TestAllTypes_NestedEnum { - if m != nil { - return m.MapStringNestedEnum - } - return nil -} - -func (m *TestAllTypes) GetMapStringForeignEnum() map[string]ForeignEnum { - if m != nil { - return m.MapStringForeignEnum - } - return nil -} - -type isTestAllTypes_OneofField interface { - isTestAllTypes_OneofField() -} - -type TestAllTypes_OneofUint32 struct { - OneofUint32 uint32 `protobuf:"varint,111,opt,name=oneof_uint32,json=oneofUint32,proto3,oneof"` -} - -type TestAllTypes_OneofNestedMessage struct { - OneofNestedMessage *TestAllTypes_NestedMessage `protobuf:"bytes,112,opt,name=oneof_nested_message,json=oneofNestedMessage,proto3,oneof"` -} - -type TestAllTypes_OneofString struct { - OneofString string `protobuf:"bytes,113,opt,name=oneof_string,json=oneofString,proto3,oneof"` -} - -type TestAllTypes_OneofBytes struct { - OneofBytes []byte `protobuf:"bytes,114,opt,name=oneof_bytes,json=oneofBytes,proto3,oneof"` -} - -func (*TestAllTypes_OneofUint32) isTestAllTypes_OneofField() {} - -func (*TestAllTypes_OneofNestedMessage) isTestAllTypes_OneofField() {} - -func (*TestAllTypes_OneofString) isTestAllTypes_OneofField() {} - -func (*TestAllTypes_OneofBytes) isTestAllTypes_OneofField() {} - -func (m *TestAllTypes) GetOneofField() isTestAllTypes_OneofField { - if m != nil { - return m.OneofField - } - return nil -} - -func (m *TestAllTypes) GetOneofUint32() uint32 { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofUint32); ok { - return x.OneofUint32 - } - return 0 -} - -func (m *TestAllTypes) GetOneofNestedMessage() *TestAllTypes_NestedMessage { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofNestedMessage); ok { - return x.OneofNestedMessage - } - return nil -} - -func (m *TestAllTypes) GetOneofString() string { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofString); ok { - return x.OneofString - } - return "" -} - -func (m *TestAllTypes) GetOneofBytes() []byte { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofBytes); ok { - return x.OneofBytes - } - return nil -} - -func (m *TestAllTypes) GetOptionalBoolWrapper() *wrappers.BoolValue { - if m != nil { - return m.OptionalBoolWrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalInt32Wrapper() *wrappers.Int32Value { - if m != nil { - return m.OptionalInt32Wrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalInt64Wrapper() *wrappers.Int64Value { - if m != nil { - return m.OptionalInt64Wrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalUint32Wrapper() *wrappers.UInt32Value { - if m != nil { - return m.OptionalUint32Wrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalUint64Wrapper() *wrappers.UInt64Value { - if m != nil { - return m.OptionalUint64Wrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalFloatWrapper() *wrappers.FloatValue { - if m != nil { - return m.OptionalFloatWrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalDoubleWrapper() *wrappers.DoubleValue { - if m != nil { - return m.OptionalDoubleWrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalStringWrapper() *wrappers.StringValue { - if m != nil { - return m.OptionalStringWrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalBytesWrapper() *wrappers.BytesValue { - if m != nil { - return m.OptionalBytesWrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedBoolWrapper() []*wrappers.BoolValue { - if m != nil { - return m.RepeatedBoolWrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedInt32Wrapper() []*wrappers.Int32Value { - if m != nil { - return m.RepeatedInt32Wrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedInt64Wrapper() []*wrappers.Int64Value { - if m != nil { - return m.RepeatedInt64Wrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedUint32Wrapper() []*wrappers.UInt32Value { - if m != nil { - return m.RepeatedUint32Wrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedUint64Wrapper() []*wrappers.UInt64Value { - if m != nil { - return m.RepeatedUint64Wrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedFloatWrapper() []*wrappers.FloatValue { - if m != nil { - return m.RepeatedFloatWrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedDoubleWrapper() []*wrappers.DoubleValue { - if m != nil { - return m.RepeatedDoubleWrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedStringWrapper() []*wrappers.StringValue { - if m != nil { - return m.RepeatedStringWrapper - } - return nil -} - -func (m *TestAllTypes) GetRepeatedBytesWrapper() []*wrappers.BytesValue { - if m != nil { - return m.RepeatedBytesWrapper - } - return nil -} - -func (m *TestAllTypes) GetOptionalDuration() *duration.Duration { - if m != nil { - return m.OptionalDuration - } - return nil -} - -func (m *TestAllTypes) GetOptionalTimestamp() *timestamp.Timestamp { - if m != nil { - return m.OptionalTimestamp - } - return nil -} - -func (m *TestAllTypes) GetOptionalFieldMask() *field_mask.FieldMask { - if m != nil { - return m.OptionalFieldMask - } - return nil -} - -func (m *TestAllTypes) GetOptionalStruct() *_struct.Struct { - if m != nil { - return m.OptionalStruct - } - return nil -} - -func (m *TestAllTypes) GetOptionalAny() *any.Any { - if m != nil { - return m.OptionalAny - } - return nil -} - -func (m *TestAllTypes) GetOptionalValue() *_struct.Value { - if m != nil { - return m.OptionalValue - } - return nil -} - -func (m *TestAllTypes) GetRepeatedDuration() []*duration.Duration { - if m != nil { - return m.RepeatedDuration - } - return nil -} - -func (m *TestAllTypes) GetRepeatedTimestamp() []*timestamp.Timestamp { - if m != nil { - return m.RepeatedTimestamp - } - return nil -} - -func (m *TestAllTypes) GetRepeatedFieldmask() []*field_mask.FieldMask { - if m != nil { - return m.RepeatedFieldmask - } - return nil -} - -func (m *TestAllTypes) GetRepeatedStruct() []*_struct.Struct { - if m != nil { - return m.RepeatedStruct - } - return nil -} - -func (m *TestAllTypes) GetRepeatedAny() []*any.Any { - if m != nil { - return m.RepeatedAny - } - return nil -} - -func (m *TestAllTypes) GetRepeatedValue() []*_struct.Value { - if m != nil { - return m.RepeatedValue - } - return nil -} - -func (m *TestAllTypes) GetFieldname1() int32 { - if m != nil { - return m.Fieldname1 - } - return 0 -} - -func (m *TestAllTypes) GetFieldName2() int32 { - if m != nil { - return m.FieldName2 - } - return 0 -} - -func (m *TestAllTypes) GetXFieldName3() int32 { - if m != nil { - return m.XFieldName3 - } - return 0 -} - -func (m *TestAllTypes) GetField_Name4_() int32 { - if m != nil { - return m.Field_Name4_ - } - return 0 -} - -func (m *TestAllTypes) GetField0Name5() int32 { - if m != nil { - return m.Field0Name5 - } - return 0 -} - -func (m *TestAllTypes) GetField_0Name6() int32 { - if m != nil { - return m.Field_0Name6 - } - return 0 -} - -func (m *TestAllTypes) GetFieldName7() int32 { - if m != nil { - return m.FieldName7 - } - return 0 -} - -func (m *TestAllTypes) GetFieldName8() int32 { - if m != nil { - return m.FieldName8 - } - return 0 -} - -func (m *TestAllTypes) GetField_Name9() int32 { - if m != nil { - return m.Field_Name9 - } - return 0 -} - -func (m *TestAllTypes) GetField_Name10() int32 { - if m != nil { - return m.Field_Name10 - } - return 0 -} - -func (m *TestAllTypes) GetFIELD_NAME11() int32 { - if m != nil { - return m.FIELD_NAME11 - } - return 0 -} - -func (m *TestAllTypes) GetFIELDName12() int32 { - if m != nil { - return m.FIELDName12 - } - return 0 -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*TestAllTypes) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _TestAllTypes_OneofMarshaler, _TestAllTypes_OneofUnmarshaler, _TestAllTypes_OneofSizer, []interface{}{ - (*TestAllTypes_OneofUint32)(nil), - (*TestAllTypes_OneofNestedMessage)(nil), - (*TestAllTypes_OneofString)(nil), - (*TestAllTypes_OneofBytes)(nil), - } -} - -func _TestAllTypes_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*TestAllTypes) - // oneof_field - switch x := m.OneofField.(type) { - case *TestAllTypes_OneofUint32: - b.EncodeVarint(111<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.OneofUint32)) - case *TestAllTypes_OneofNestedMessage: - b.EncodeVarint(112<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.OneofNestedMessage); err != nil { - return err - } - case *TestAllTypes_OneofString: - b.EncodeVarint(113<<3 | proto.WireBytes) - b.EncodeStringBytes(x.OneofString) - case *TestAllTypes_OneofBytes: - b.EncodeVarint(114<<3 | proto.WireBytes) - b.EncodeRawBytes(x.OneofBytes) - case nil: - default: - return fmt.Errorf("TestAllTypes.OneofField has unexpected type %T", x) - } - return nil -} - -func _TestAllTypes_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*TestAllTypes) - switch tag { - case 111: // oneof_field.oneof_uint32 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.OneofField = &TestAllTypes_OneofUint32{uint32(x)} - return true, err - case 112: // oneof_field.oneof_nested_message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(TestAllTypes_NestedMessage) - err := b.DecodeMessage(msg) - m.OneofField = &TestAllTypes_OneofNestedMessage{msg} - return true, err - case 113: // oneof_field.oneof_string - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.OneofField = &TestAllTypes_OneofString{x} - return true, err - case 114: // oneof_field.oneof_bytes - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.OneofField = &TestAllTypes_OneofBytes{x} - return true, err - default: - return false, nil - } -} - -func _TestAllTypes_OneofSizer(msg proto.Message) (n int) { - m := msg.(*TestAllTypes) - // oneof_field - switch x := m.OneofField.(type) { - case *TestAllTypes_OneofUint32: - n += 2 // tag and wire - n += proto.SizeVarint(uint64(x.OneofUint32)) - case *TestAllTypes_OneofNestedMessage: - s := proto.Size(x.OneofNestedMessage) - n += 2 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *TestAllTypes_OneofString: - n += 2 // tag and wire - n += proto.SizeVarint(uint64(len(x.OneofString))) - n += len(x.OneofString) - case *TestAllTypes_OneofBytes: - n += 2 // tag and wire - n += proto.SizeVarint(uint64(len(x.OneofBytes))) - n += len(x.OneofBytes) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type TestAllTypes_NestedMessage struct { - A int32 `protobuf:"varint,1,opt,name=a,proto3" json:"a,omitempty"` - Corecursive *TestAllTypes `protobuf:"bytes,2,opt,name=corecursive,proto3" json:"corecursive,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TestAllTypes_NestedMessage) Reset() { *m = TestAllTypes_NestedMessage{} } -func (m *TestAllTypes_NestedMessage) String() string { return proto.CompactTextString(m) } -func (*TestAllTypes_NestedMessage) ProtoMessage() {} -func (*TestAllTypes_NestedMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_conformance_48ac832451f5d6c3, []int{2, 0} -} -func (m *TestAllTypes_NestedMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestAllTypes_NestedMessage.Unmarshal(m, b) -} -func (m *TestAllTypes_NestedMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestAllTypes_NestedMessage.Marshal(b, m, deterministic) -} -func (dst *TestAllTypes_NestedMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestAllTypes_NestedMessage.Merge(dst, src) -} -func (m *TestAllTypes_NestedMessage) XXX_Size() int { - return xxx_messageInfo_TestAllTypes_NestedMessage.Size(m) -} -func (m *TestAllTypes_NestedMessage) XXX_DiscardUnknown() { - xxx_messageInfo_TestAllTypes_NestedMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_TestAllTypes_NestedMessage proto.InternalMessageInfo - -func (m *TestAllTypes_NestedMessage) GetA() int32 { - if m != nil { - return m.A - } - return 0 -} - -func (m *TestAllTypes_NestedMessage) GetCorecursive() *TestAllTypes { - if m != nil { - return m.Corecursive - } - return nil -} - -type ForeignMessage struct { - C int32 `protobuf:"varint,1,opt,name=c,proto3" json:"c,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ForeignMessage) Reset() { *m = ForeignMessage{} } -func (m *ForeignMessage) String() string { return proto.CompactTextString(m) } -func (*ForeignMessage) ProtoMessage() {} -func (*ForeignMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_conformance_48ac832451f5d6c3, []int{3} -} -func (m *ForeignMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ForeignMessage.Unmarshal(m, b) -} -func (m *ForeignMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ForeignMessage.Marshal(b, m, deterministic) -} -func (dst *ForeignMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForeignMessage.Merge(dst, src) -} -func (m *ForeignMessage) XXX_Size() int { - return xxx_messageInfo_ForeignMessage.Size(m) -} -func (m *ForeignMessage) XXX_DiscardUnknown() { - xxx_messageInfo_ForeignMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_ForeignMessage proto.InternalMessageInfo - -func (m *ForeignMessage) GetC() int32 { - if m != nil { - return m.C - } - return 0 -} - -func init() { - proto.RegisterType((*ConformanceRequest)(nil), "conformance.ConformanceRequest") - proto.RegisterType((*ConformanceResponse)(nil), "conformance.ConformanceResponse") - proto.RegisterType((*TestAllTypes)(nil), "conformance.TestAllTypes") - proto.RegisterMapType((map[bool]bool)(nil), "conformance.TestAllTypes.MapBoolBoolEntry") - proto.RegisterMapType((map[uint32]uint32)(nil), "conformance.TestAllTypes.MapFixed32Fixed32Entry") - proto.RegisterMapType((map[uint64]uint64)(nil), "conformance.TestAllTypes.MapFixed64Fixed64Entry") - proto.RegisterMapType((map[int32]float64)(nil), "conformance.TestAllTypes.MapInt32DoubleEntry") - proto.RegisterMapType((map[int32]float32)(nil), "conformance.TestAllTypes.MapInt32FloatEntry") - proto.RegisterMapType((map[int32]int32)(nil), "conformance.TestAllTypes.MapInt32Int32Entry") - proto.RegisterMapType((map[int64]int64)(nil), "conformance.TestAllTypes.MapInt64Int64Entry") - proto.RegisterMapType((map[int32]int32)(nil), "conformance.TestAllTypes.MapSfixed32Sfixed32Entry") - proto.RegisterMapType((map[int64]int64)(nil), "conformance.TestAllTypes.MapSfixed64Sfixed64Entry") - proto.RegisterMapType((map[int32]int32)(nil), "conformance.TestAllTypes.MapSint32Sint32Entry") - proto.RegisterMapType((map[int64]int64)(nil), "conformance.TestAllTypes.MapSint64Sint64Entry") - proto.RegisterMapType((map[string][]byte)(nil), "conformance.TestAllTypes.MapStringBytesEntry") - proto.RegisterMapType((map[string]ForeignEnum)(nil), "conformance.TestAllTypes.MapStringForeignEnumEntry") - proto.RegisterMapType((map[string]*ForeignMessage)(nil), "conformance.TestAllTypes.MapStringForeignMessageEntry") - proto.RegisterMapType((map[string]TestAllTypes_NestedEnum)(nil), "conformance.TestAllTypes.MapStringNestedEnumEntry") - proto.RegisterMapType((map[string]*TestAllTypes_NestedMessage)(nil), "conformance.TestAllTypes.MapStringNestedMessageEntry") - proto.RegisterMapType((map[string]string)(nil), "conformance.TestAllTypes.MapStringStringEntry") - proto.RegisterMapType((map[uint32]uint32)(nil), "conformance.TestAllTypes.MapUint32Uint32Entry") - proto.RegisterMapType((map[uint64]uint64)(nil), "conformance.TestAllTypes.MapUint64Uint64Entry") - proto.RegisterType((*TestAllTypes_NestedMessage)(nil), "conformance.TestAllTypes.NestedMessage") - proto.RegisterType((*ForeignMessage)(nil), "conformance.ForeignMessage") - proto.RegisterEnum("conformance.WireFormat", WireFormat_name, WireFormat_value) - proto.RegisterEnum("conformance.ForeignEnum", ForeignEnum_name, ForeignEnum_value) - proto.RegisterEnum("conformance.TestAllTypes_NestedEnum", TestAllTypes_NestedEnum_name, TestAllTypes_NestedEnum_value) -} - -func init() { proto.RegisterFile("conformance.proto", fileDescriptor_conformance_48ac832451f5d6c3) } - -var fileDescriptor_conformance_48ac832451f5d6c3 = []byte{ - // 2600 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5a, 0x5b, 0x73, 0x13, 0xc9, - 0x15, 0xf6, 0x68, 0xc0, 0x36, 0x2d, 0xd9, 0x96, 0xdb, 0xb7, 0xc6, 0x50, 0xcb, 0x60, 0x96, 0x20, - 0x60, 0xd7, 0xeb, 0xcb, 0x30, 0x5c, 0x36, 0x4b, 0xb0, 0xc0, 0x02, 0x93, 0xc5, 0xa2, 0xc6, 0x78, - 0xa9, 0x22, 0x0f, 0xca, 0x20, 0x8f, 0x5d, 0x5a, 0x24, 0x8d, 0x76, 0x66, 0xb4, 0x89, 0xf3, 0x98, - 0x7f, 0x90, 0xfb, 0xf5, 0x2f, 0xe4, 0x5a, 0x95, 0x4a, 0x52, 0xc9, 0x53, 0x2a, 0x2f, 0xb9, 0x27, - 0x95, 0x7b, 0xf2, 0x63, 0x92, 0xea, 0xeb, 0x74, 0xb7, 0x7a, 0x64, 0xb1, 0x55, 0x2b, 0x5b, 0xa7, - 0xbf, 0xfe, 0xce, 0xe9, 0xd3, 0x67, 0xbe, 0x76, 0x9f, 0x01, 0xcc, 0x36, 0xa3, 0xee, 0x61, 0x14, - 0x77, 0x82, 0x6e, 0x33, 0x5c, 0xed, 0xc5, 0x51, 0x1a, 0xc1, 0xa2, 0x64, 0x5a, 0x3e, 0x7b, 0x14, - 0x45, 0x47, 0xed, 0xf0, 0x1d, 0x32, 0xf4, 0xb2, 0x7f, 0xf8, 0x4e, 0xd0, 0x3d, 0xa6, 0xb8, 0xe5, - 0x37, 0xf4, 0xa1, 0x83, 0x7e, 0x1c, 0xa4, 0xad, 0xa8, 0xcb, 0xc6, 0x1d, 0x7d, 0xfc, 0xb0, 0x15, - 0xb6, 0x0f, 0x1a, 0x9d, 0x20, 0x79, 0xc5, 0x10, 0xe7, 0x75, 0x44, 0x92, 0xc6, 0xfd, 0x66, 0xca, - 0x46, 0x2f, 0xe8, 0xa3, 0x69, 0xab, 0x13, 0x26, 0x69, 0xd0, 0xe9, 0xe5, 0x05, 0xf0, 0xb9, 0x38, - 0xe8, 0xf5, 0xc2, 0x38, 0xa1, 0xe3, 0x2b, 0xbf, 0xb2, 0x00, 0xbc, 0x9f, 0xad, 0xc5, 0x0f, 0x3f, - 0xea, 0x87, 0x49, 0x0a, 0xaf, 0x83, 0x32, 0x9f, 0xd1, 0xe8, 0x05, 0xc7, 0xed, 0x28, 0x38, 0x40, - 0x96, 0x63, 0x55, 0x4a, 0x8f, 0xc6, 0xfc, 0x19, 0x3e, 0xf2, 0x94, 0x0e, 0xc0, 0x4b, 0xa0, 0xf4, - 0x61, 0x12, 0x75, 0x05, 0xb0, 0xe0, 0x58, 0x95, 0x33, 0x8f, 0xc6, 0xfc, 0x22, 0xb6, 0x72, 0x50, - 0x1d, 0x2c, 0xc5, 0x94, 0x3c, 0x3c, 0x68, 0x44, 0xfd, 0xb4, 0xd7, 0x4f, 0x1b, 0xc4, 0x6b, 0x8a, - 0x6c, 0xc7, 0xaa, 0x4c, 0x6f, 0x2c, 0xad, 0xca, 0x69, 0x7e, 0xde, 0x8a, 0xc3, 0x1a, 0x19, 0xf6, - 0x17, 0xc4, 0xbc, 0x3a, 0x99, 0x46, 0xcd, 0xd5, 0x33, 0x60, 0x82, 0x39, 0x5c, 0xf9, 0x62, 0x01, - 0xcc, 0x29, 0x8b, 0x48, 0x7a, 0x51, 0x37, 0x09, 0xe1, 0x45, 0x50, 0xec, 0x05, 0x71, 0x12, 0x36, - 0xc2, 0x38, 0x8e, 0x62, 0xb2, 0x00, 0x1c, 0x17, 0x20, 0xc6, 0x6d, 0x6c, 0x83, 0x57, 0xc1, 0x4c, - 0x12, 0xc6, 0xad, 0xa0, 0xdd, 0xfa, 0x02, 0x87, 0x8d, 0x33, 0xd8, 0xb4, 0x18, 0xa0, 0xd0, 0xcb, - 0x60, 0x2a, 0xee, 0x77, 0x71, 0x82, 0x19, 0x90, 0xaf, 0xb3, 0xc4, 0xcc, 0x14, 0x66, 0x4a, 0x9d, - 0x3d, 0x6a, 0xea, 0x4e, 0x99, 0x52, 0xb7, 0x0c, 0x26, 0x92, 0x57, 0xad, 0x5e, 0x2f, 0x3c, 0x40, - 0xa7, 0xd9, 0x38, 0x37, 0x54, 0x27, 0xc1, 0x78, 0x1c, 0x26, 0xfd, 0x76, 0xba, 0xf2, 0x93, 0xfb, - 0xa0, 0xf4, 0x2c, 0x4c, 0xd2, 0xad, 0x76, 0xfb, 0xd9, 0x71, 0x2f, 0x4c, 0xe0, 0x65, 0x30, 0x1d, - 0xf5, 0x70, 0xad, 0x05, 0xed, 0x46, 0xab, 0x9b, 0x6e, 0x6e, 0x90, 0x04, 0x9c, 0xf6, 0xa7, 0xb8, - 0x75, 0x07, 0x1b, 0x75, 0x98, 0xe7, 0x92, 0x75, 0xd9, 0x0a, 0xcc, 0x73, 0xe1, 0x15, 0x30, 0x23, - 0x60, 0x7d, 0x4a, 0x87, 0x57, 0x35, 0xe5, 0x8b, 0xd9, 0xfb, 0xc4, 0x3a, 0x00, 0xf4, 0x5c, 0xb2, - 0xaa, 0x53, 0x2a, 0x50, 0x63, 0x4c, 0x28, 0x23, 0x5e, 0xde, 0x6c, 0x06, 0xdc, 0x1b, 0x64, 0x4c, - 0x28, 0x23, 0xde, 0x23, 0xa8, 0x02, 0x3d, 0x17, 0x5e, 0x05, 0x65, 0x01, 0x3c, 0x6c, 0x7d, 0x3e, - 0x3c, 0xd8, 0xdc, 0x40, 0x13, 0x8e, 0x55, 0x99, 0xf0, 0x05, 0x41, 0x8d, 0x9a, 0x07, 0xa1, 0x9e, - 0x8b, 0x26, 0x1d, 0xab, 0x32, 0xae, 0x41, 0x3d, 0x17, 0x5e, 0x07, 0xb3, 0x99, 0x7b, 0x4e, 0x7b, - 0xc6, 0xb1, 0x2a, 0x33, 0xbe, 0xe0, 0xd8, 0x63, 0x76, 0x03, 0xd8, 0x73, 0x11, 0x70, 0xac, 0x4a, - 0x59, 0x07, 0x7b, 0xae, 0x92, 0xfa, 0xc3, 0x76, 0x14, 0xa4, 0xa8, 0xe8, 0x58, 0x95, 0x42, 0x96, - 0xfa, 0x1a, 0x36, 0x2a, 0xeb, 0x3f, 0x88, 0xfa, 0x2f, 0xdb, 0x21, 0x2a, 0x39, 0x56, 0xc5, 0xca, - 0xd6, 0xff, 0x80, 0x58, 0xe1, 0x25, 0x20, 0x66, 0x36, 0x5e, 0x46, 0x51, 0x1b, 0x4d, 0x39, 0x56, - 0x65, 0xd2, 0x2f, 0x71, 0x63, 0x35, 0x8a, 0xda, 0x6a, 0x36, 0xd3, 0xb8, 0xd5, 0x3d, 0x42, 0xd3, - 0xb8, 0xaa, 0xa4, 0x6c, 0x12, 0xab, 0x12, 0xdd, 0xcb, 0xe3, 0x34, 0x4c, 0xd0, 0x0c, 0x2e, 0xe3, - 0x2c, 0xba, 0x2a, 0x36, 0xc2, 0x06, 0x58, 0x12, 0xb0, 0x2e, 0x7d, 0xbc, 0x3b, 0x61, 0x92, 0x04, - 0x47, 0x21, 0x82, 0x8e, 0x55, 0x29, 0x6e, 0x5c, 0x51, 0x1e, 0x6c, 0xb9, 0x44, 0x57, 0x77, 0x09, - 0xfe, 0x09, 0x85, 0xfb, 0x0b, 0x9c, 0x47, 0x31, 0xc3, 0x7d, 0x80, 0xb2, 0x2c, 0x45, 0x71, 0xd8, - 0x3a, 0xea, 0x0a, 0x0f, 0x73, 0xc4, 0xc3, 0x39, 0xc5, 0x43, 0x8d, 0x62, 0x38, 0xeb, 0xa2, 0x48, - 0xa6, 0x62, 0x87, 0x1f, 0x80, 0x79, 0x3d, 0xee, 0xb0, 0xdb, 0xef, 0xa0, 0x05, 0xa2, 0x46, 0x6f, - 0x9e, 0x14, 0xf4, 0x76, 0xb7, 0xdf, 0xf1, 0xa1, 0x1a, 0x31, 0xb6, 0xc1, 0xf7, 0xc1, 0xc2, 0x40, - 0xb8, 0x84, 0x78, 0x91, 0x10, 0x23, 0x53, 0xac, 0x84, 0x6c, 0x4e, 0x0b, 0x94, 0xb0, 0x79, 0x12, - 0x1b, 0xdd, 0xad, 0x46, 0xaf, 0x15, 0x36, 0x43, 0x84, 0xf0, 0x9e, 0x55, 0x0b, 0x93, 0x85, 0x6c, - 0x1e, 0xdd, 0xb7, 0xa7, 0x78, 0x18, 0x5e, 0x91, 0x4a, 0xa1, 0x19, 0xc5, 0x07, 0xe8, 0x2c, 0xc3, - 0x5b, 0x59, 0x39, 0xdc, 0x8f, 0xe2, 0x03, 0x58, 0x03, 0xb3, 0x71, 0xd8, 0xec, 0xc7, 0x49, 0xeb, - 0xe3, 0x50, 0xa4, 0xf5, 0x1c, 0x49, 0xeb, 0xd9, 0xdc, 0x1c, 0xf8, 0x65, 0x31, 0x87, 0xa7, 0xf3, - 0x32, 0x98, 0x8e, 0xc3, 0x5e, 0x18, 0xe0, 0x3c, 0xd2, 0x87, 0xf9, 0x82, 0x63, 0x63, 0xb5, 0xe1, - 0x56, 0xa1, 0x36, 0x32, 0xcc, 0x73, 0x91, 0xe3, 0xd8, 0x58, 0x6d, 0x24, 0x18, 0xd5, 0x06, 0x01, - 0x63, 0x6a, 0x73, 0xd1, 0xb1, 0xb1, 0xda, 0x70, 0x73, 0xa6, 0x36, 0x0a, 0xd0, 0x73, 0xd1, 0x8a, - 0x63, 0x63, 0xb5, 0x91, 0x81, 0x1a, 0x23, 0x53, 0x9b, 0x4b, 0x8e, 0x8d, 0xd5, 0x86, 0x9b, 0xf7, - 0x06, 0x19, 0x99, 0xda, 0xbc, 0xe9, 0xd8, 0x58, 0x6d, 0x64, 0x20, 0x55, 0x1b, 0x01, 0xe4, 0xb2, - 0x70, 0xd9, 0xb1, 0xb1, 0xda, 0x70, 0xbb, 0xa4, 0x36, 0x2a, 0xd4, 0x73, 0xd1, 0x27, 0x1c, 0x1b, - 0xab, 0x8d, 0x02, 0xa5, 0x6a, 0x93, 0xb9, 0xe7, 0xb4, 0x57, 0x1c, 0x1b, 0xab, 0x8d, 0x08, 0x40, - 0x52, 0x1b, 0x0d, 0xec, 0xb9, 0xa8, 0xe2, 0xd8, 0x58, 0x6d, 0x54, 0x30, 0x55, 0x9b, 0x2c, 0x08, - 0xa2, 0x36, 0x57, 0x1d, 0x1b, 0xab, 0x8d, 0x08, 0x81, 0xab, 0x8d, 0x80, 0x31, 0xb5, 0xb9, 0xe6, - 0xd8, 0x58, 0x6d, 0xb8, 0x39, 0x53, 0x1b, 0x01, 0x24, 0x6a, 0x73, 0xdd, 0xb1, 0xb1, 0xda, 0x70, - 0x23, 0x57, 0x9b, 0x2c, 0x42, 0xaa, 0x36, 0x6f, 0x39, 0x36, 0x56, 0x1b, 0x11, 0x9f, 0x50, 0x9b, - 0x8c, 0x8d, 0xa8, 0xcd, 0xdb, 0x8e, 0x8d, 0xd5, 0x46, 0xd0, 0x71, 0xb5, 0x11, 0x30, 0x4d, 0x6d, - 0xd6, 0x1c, 0xfb, 0xb5, 0xd4, 0x86, 0xf3, 0x0c, 0xa8, 0x4d, 0x96, 0x25, 0x4d, 0x6d, 0xd6, 0x89, - 0x87, 0xe1, 0x6a, 0x23, 0x92, 0x39, 0xa0, 0x36, 0x7a, 0xdc, 0x44, 0x14, 0x36, 0x1d, 0x7b, 0x74, - 0xb5, 0x51, 0x23, 0xe6, 0x6a, 0x33, 0x10, 0x2e, 0x21, 0x76, 0x09, 0xf1, 0x10, 0xb5, 0xd1, 0x02, - 0xe5, 0x6a, 0xa3, 0xed, 0x16, 0x53, 0x1b, 0x0f, 0xef, 0x19, 0x55, 0x1b, 0x75, 0xdf, 0x84, 0xda, - 0x88, 0x79, 0x44, 0x6d, 0x6e, 0x32, 0xbc, 0x95, 0x95, 0x03, 0x51, 0x9b, 0x67, 0x60, 0xa6, 0x13, - 0xf4, 0xa8, 0x40, 0x30, 0x99, 0xb8, 0x45, 0x92, 0xfa, 0x56, 0x7e, 0x06, 0x9e, 0x04, 0x3d, 0xa2, - 0x1d, 0xe4, 0x63, 0xbb, 0x9b, 0xc6, 0xc7, 0xfe, 0x54, 0x47, 0xb6, 0x49, 0xac, 0x9e, 0xcb, 0x54, - 0xe5, 0xf6, 0x68, 0xac, 0x9e, 0x4b, 0x3e, 0x14, 0x56, 0x66, 0x83, 0x2f, 0xc0, 0x2c, 0x66, 0xa5, - 0xf2, 0xc3, 0x55, 0xe8, 0x0e, 0xe1, 0x5d, 0x1d, 0xca, 0x4b, 0xa5, 0x89, 0x7e, 0x52, 0x66, 0x1c, - 0x9e, 0x6c, 0x95, 0xb9, 0x3d, 0x97, 0x0b, 0xd7, 0xbb, 0x23, 0x72, 0x7b, 0x2e, 0xfd, 0x54, 0xb9, - 0xb9, 0x95, 0x73, 0x53, 0x91, 0xe3, 0x5a, 0xf7, 0xc9, 0x11, 0xb8, 0xa9, 0x00, 0xee, 0x69, 0x71, - 0xcb, 0x56, 0x99, 0xdb, 0x73, 0xb9, 0x3c, 0xbe, 0x37, 0x22, 0xb7, 0xe7, 0xee, 0x69, 0x71, 0xcb, - 0x56, 0xf8, 0x59, 0x30, 0x87, 0xb9, 0x99, 0xb6, 0x09, 0x49, 0xbd, 0x4b, 0xd8, 0xd7, 0x86, 0xb2, - 0x33, 0x9d, 0x65, 0x3f, 0x28, 0x3f, 0x0e, 0x54, 0xb5, 0x2b, 0x1e, 0x3c, 0x57, 0x28, 0xf1, 0xa7, - 0x46, 0xf5, 0xe0, 0xb9, 0xec, 0x87, 0xe6, 0x41, 0xd8, 0xe1, 0x21, 0x58, 0x20, 0xf9, 0xe1, 0x8b, - 0x10, 0x0a, 0x7e, 0x8f, 0xf8, 0xd8, 0x18, 0x9e, 0x23, 0x06, 0xe6, 0x3f, 0xa9, 0x17, 0x1c, 0xb2, - 0x3e, 0xa2, 0xfa, 0xc1, 0x3b, 0xc1, 0xd7, 0xb2, 0x35, 0xb2, 0x1f, 0xcf, 0xe5, 0x3f, 0x75, 0x3f, - 0xd9, 0x88, 0xfa, 0xbc, 0xd2, 0x43, 0xa3, 0x3a, 0xea, 0xf3, 0x4a, 0x8e, 0x13, 0xed, 0x79, 0xa5, - 0x47, 0xcc, 0x73, 0x50, 0xce, 0x58, 0xd9, 0x19, 0x73, 0x9f, 0xd0, 0xbe, 0x7d, 0x32, 0x2d, 0x3d, - 0x7d, 0x28, 0xef, 0x74, 0x47, 0x31, 0xc2, 0x5d, 0x80, 0x3d, 0x91, 0xd3, 0x88, 0x1e, 0x49, 0x0f, - 0x08, 0xeb, 0xb5, 0xa1, 0xac, 0xf8, 0x9c, 0xc2, 0xff, 0x53, 0xca, 0x62, 0x27, 0xb3, 0x88, 0x72, - 0xa7, 0x52, 0xc8, 0xce, 0xaf, 0xed, 0x51, 0xca, 0x9d, 0x40, 0xe9, 0xa7, 0x54, 0xee, 0x92, 0x95, - 0x27, 0x81, 0x71, 0xd3, 0x23, 0xaf, 0x36, 0x42, 0x12, 0xe8, 0x74, 0x72, 0x1a, 0x66, 0x49, 0x90, - 0x8c, 0xb0, 0x07, 0xce, 0x4a, 0xc4, 0xda, 0x21, 0xf9, 0x90, 0x78, 0xb8, 0x31, 0x82, 0x07, 0xe5, - 0x58, 0xa4, 0x9e, 0x16, 0x3b, 0xc6, 0x41, 0x98, 0x80, 0x65, 0xc9, 0xa3, 0x7e, 0x6a, 0x3e, 0x22, - 0x2e, 0xbd, 0x11, 0x5c, 0xaa, 0x67, 0x26, 0xf5, 0xb9, 0xd4, 0x31, 0x8f, 0xc2, 0x23, 0xb0, 0x38, - 0xb8, 0x4c, 0x72, 0xf4, 0xed, 0x8c, 0xf2, 0x0c, 0x48, 0xcb, 0xc0, 0x47, 0x9f, 0xf4, 0x0c, 0x68, - 0x23, 0xf0, 0x43, 0xb0, 0x64, 0x58, 0x1d, 0xf1, 0xf4, 0x98, 0x78, 0xda, 0x1c, 0x7d, 0x69, 0x99, - 0xab, 0xf9, 0x8e, 0x61, 0x08, 0x5e, 0x02, 0xa5, 0xa8, 0x1b, 0x46, 0x87, 0xfc, 0xb8, 0x89, 0xf0, - 0x15, 0xfb, 0xd1, 0x98, 0x5f, 0x24, 0x56, 0x76, 0x78, 0x7c, 0x06, 0xcc, 0x53, 0x90, 0xb6, 0xb7, - 0xbd, 0xd7, 0xba, 0x6e, 0x3d, 0x1a, 0xf3, 0x21, 0xa1, 0x51, 0xf7, 0x52, 0x44, 0xc0, 0xaa, 0xfd, - 0x23, 0xde, 0x91, 0x20, 0x56, 0x56, 0xbb, 0x17, 0x01, 0xfd, 0xca, 0xca, 0x36, 0x66, 0xed, 0x0d, - 0x40, 0x8c, 0xb4, 0x0a, 0xeb, 0xd2, 0xc5, 0x85, 0x3c, 0x8f, 0xac, 0xf1, 0x84, 0x7e, 0x63, 0x91, - 0x30, 0x97, 0x57, 0x69, 0x67, 0x6a, 0x95, 0xb7, 0x44, 0x56, 0xf1, 0x13, 0xf7, 0x41, 0xd0, 0xee, - 0x87, 0xd9, 0x8d, 0x06, 0x9b, 0x9e, 0xd3, 0x79, 0xd0, 0x07, 0x8b, 0x6a, 0x3b, 0x43, 0x30, 0xfe, - 0xd6, 0x62, 0xb7, 0x40, 0x9d, 0x91, 0x48, 0x03, 0xa5, 0x9c, 0x57, 0x9a, 0x1e, 0x39, 0x9c, 0x9e, - 0x2b, 0x38, 0x7f, 0x37, 0x84, 0xd3, 0x73, 0x07, 0x39, 0x3d, 0x97, 0x73, 0xee, 0x4b, 0xf7, 0xe1, - 0xbe, 0x1a, 0xe8, 0xef, 0x29, 0xe9, 0xf9, 0x01, 0xd2, 0x7d, 0x29, 0xd2, 0x05, 0xb5, 0x9f, 0x92, - 0x47, 0x2b, 0xc5, 0xfa, 0x87, 0x61, 0xb4, 0x3c, 0xd8, 0x05, 0xb5, 0xfb, 0x62, 0xca, 0x00, 0xd1, - 0x77, 0xc1, 0xfa, 0xc7, 0xbc, 0x0c, 0x10, 0x0d, 0xd7, 0x32, 0x40, 0x6c, 0xa6, 0x50, 0xa9, 0xba, - 0x0b, 0xd2, 0x3f, 0xe5, 0x85, 0x4a, 0x05, 0x5c, 0x0b, 0x95, 0x1a, 0x4d, 0xb4, 0xec, 0x61, 0xe4, - 0xb4, 0x7f, 0xce, 0xa3, 0xa5, 0xf5, 0xaa, 0xd1, 0x52, 0xa3, 0x29, 0x03, 0xa4, 0x9c, 0x05, 0xeb, - 0x5f, 0xf2, 0x32, 0x40, 0x2a, 0x5c, 0xcb, 0x00, 0xb1, 0x71, 0xce, 0xba, 0xf4, 0x77, 0xb4, 0x52, - 0xfc, 0x7f, 0xb5, 0x88, 0x62, 0x0c, 0x2d, 0x7e, 0xf9, 0xfe, 0x24, 0x05, 0xa9, 0xde, 0xae, 0x05, - 0xe3, 0xdf, 0x2c, 0x76, 0x29, 0x19, 0x56, 0xfc, 0xca, 0x1d, 0x3c, 0x87, 0x53, 0x2a, 0xa8, 0xbf, - 0x0f, 0xe1, 0x14, 0xc5, 0xaf, 0x5c, 0xd8, 0xa5, 0x3d, 0xd2, 0xee, 0xed, 0x82, 0xf4, 0x1f, 0x94, - 0xf4, 0x84, 0xe2, 0x57, 0xaf, 0xf7, 0x79, 0xb4, 0x52, 0xac, 0xff, 0x1c, 0x46, 0x2b, 0x8a, 0x5f, - 0x6d, 0x06, 0x98, 0x32, 0xa0, 0x16, 0xff, 0xbf, 0xf2, 0x32, 0x20, 0x17, 0xbf, 0x72, 0x6f, 0x36, - 0x85, 0xaa, 0x15, 0xff, 0xbf, 0xf3, 0x42, 0x55, 0x8a, 0x5f, 0xbd, 0x65, 0x9b, 0x68, 0xb5, 0xe2, - 0xff, 0x4f, 0x1e, 0xad, 0x52, 0xfc, 0xea, 0xb5, 0xcd, 0x94, 0x01, 0xb5, 0xf8, 0xff, 0x9b, 0x97, - 0x01, 0xb9, 0xf8, 0x95, 0xbb, 0x39, 0xe7, 0x7c, 0x28, 0xb5, 0x40, 0xf9, 0xeb, 0x0e, 0xf4, 0xbd, - 0x02, 0x6b, 0x29, 0x0d, 0xac, 0x9d, 0x21, 0xb2, 0xf6, 0x28, 0xb7, 0xc0, 0xc7, 0x40, 0xf4, 0xd7, - 0x1a, 0xe2, 0xbd, 0x06, 0xfa, 0x7e, 0x21, 0xe7, 0xfc, 0x78, 0xc6, 0x21, 0xbe, 0xf0, 0x2f, 0x4c, - 0xf0, 0xd3, 0x60, 0x4e, 0xea, 0xf7, 0xf2, 0x77, 0x2c, 0xe8, 0x07, 0x79, 0x64, 0x35, 0x8c, 0x79, - 0x12, 0x24, 0xaf, 0x32, 0x32, 0x61, 0x82, 0x5b, 0x6a, 0x0b, 0xb5, 0xdf, 0x4c, 0xd1, 0x0f, 0x29, - 0xd1, 0x92, 0x69, 0x13, 0xfa, 0xcd, 0x54, 0x69, 0xae, 0xf6, 0x9b, 0x29, 0xbc, 0x05, 0x44, 0x1b, - 0xae, 0x11, 0x74, 0x8f, 0xd1, 0x8f, 0xe8, 0xfc, 0xf9, 0x81, 0xf9, 0x5b, 0xdd, 0x63, 0xbf, 0xc8, - 0xa1, 0x5b, 0xdd, 0x63, 0x78, 0x57, 0x6a, 0xcb, 0x7e, 0x8c, 0xb7, 0x01, 0xfd, 0x98, 0xce, 0x5d, - 0x1c, 0x98, 0x4b, 0x77, 0x49, 0x34, 0x02, 0xc9, 0x57, 0xbc, 0x3d, 0x59, 0x81, 0xf2, 0xed, 0xf9, - 0x69, 0x81, 0xec, 0xf6, 0xb0, 0xed, 0x11, 0x75, 0x29, 0x6d, 0x8f, 0x20, 0xca, 0xb6, 0xe7, 0x67, - 0x85, 0x1c, 0x85, 0x93, 0xb6, 0x87, 0x4f, 0xcb, 0xb6, 0x47, 0xe6, 0x22, 0xdb, 0x43, 0x76, 0xe7, - 0xe7, 0x79, 0x5c, 0xd2, 0xee, 0x64, 0xfd, 0x33, 0x36, 0x0b, 0xef, 0x8e, 0xfc, 0xa8, 0xe0, 0xdd, - 0xf9, 0x35, 0x25, 0xca, 0xdf, 0x1d, 0xe9, 0xe9, 0x60, 0xbb, 0x23, 0x28, 0xf0, 0xee, 0xfc, 0x82, - 0xce, 0xcf, 0xd9, 0x1d, 0x0e, 0x65, 0xbb, 0x23, 0x66, 0xd2, 0xdd, 0xf9, 0x25, 0x9d, 0x9b, 0xbb, - 0x3b, 0x1c, 0x4e, 0x77, 0xe7, 0x02, 0x00, 0x64, 0xfd, 0xdd, 0xa0, 0x13, 0xae, 0xa3, 0x2f, 0xd9, - 0xe4, 0x8d, 0x8d, 0x64, 0x82, 0x0e, 0x28, 0xd2, 0xfa, 0xc5, 0x5f, 0x37, 0xd0, 0x97, 0x65, 0xc4, - 0x2e, 0x36, 0xc1, 0x8b, 0xa0, 0xd4, 0xc8, 0x20, 0x9b, 0xe8, 0x2b, 0x0c, 0x52, 0xe3, 0x90, 0x4d, - 0xb8, 0x02, 0xa6, 0x28, 0x82, 0x40, 0xdc, 0x06, 0xfa, 0xaa, 0x4e, 0xe3, 0xe2, 0xbf, 0xf1, 0xc8, - 0xb7, 0x35, 0x0c, 0xb9, 0x81, 0xbe, 0x46, 0x11, 0xb2, 0x0d, 0x5e, 0xe2, 0x34, 0x6b, 0x84, 0xc7, - 0x43, 0x5f, 0x57, 0x40, 0x98, 0xc7, 0x13, 0x2b, 0xc2, 0xdf, 0x6e, 0xa2, 0x6f, 0xe8, 0x8e, 0x6e, - 0x62, 0x80, 0x08, 0xed, 0x16, 0xfa, 0xa6, 0x1e, 0xed, 0xad, 0x6c, 0xc9, 0xf8, 0xeb, 0x6d, 0xf4, - 0x2d, 0x9d, 0xe2, 0x36, 0x5c, 0x01, 0xa5, 0x9a, 0x40, 0xac, 0xaf, 0xa1, 0x6f, 0xb3, 0x38, 0x04, - 0xc9, 0xfa, 0x1a, 0xc1, 0xec, 0x6c, 0xbf, 0xff, 0xa0, 0xb1, 0xbb, 0xf5, 0x64, 0x7b, 0x7d, 0x1d, - 0x7d, 0x87, 0x63, 0xb0, 0x91, 0xda, 0x32, 0x0c, 0xc9, 0xf5, 0x06, 0xfa, 0xae, 0x82, 0x21, 0xb6, - 0xe5, 0x17, 0x60, 0x4a, 0xfd, 0x8b, 0xb9, 0x04, 0xac, 0x80, 0xbd, 0x5a, 0xb3, 0x02, 0xf8, 0x2e, - 0x28, 0x36, 0x23, 0xd1, 0x1d, 0x47, 0x85, 0x93, 0x3a, 0xe9, 0x32, 0x7a, 0xf9, 0x1e, 0x80, 0x83, - 0xdd, 0x2e, 0x58, 0x06, 0xf6, 0xab, 0xf0, 0x98, 0xb9, 0xc0, 0xbf, 0xc2, 0x79, 0x70, 0x9a, 0x16, - 0x57, 0x81, 0xd8, 0xe8, 0x97, 0x3b, 0x85, 0x5b, 0x56, 0xc6, 0x20, 0x77, 0xb6, 0x64, 0x06, 0xdb, - 0xc0, 0x60, 0xcb, 0x0c, 0x55, 0x30, 0x6f, 0xea, 0x61, 0xc9, 0x1c, 0x53, 0x06, 0x8e, 0x29, 0x33, - 0x87, 0xd2, 0xab, 0x92, 0x39, 0x4e, 0x19, 0x38, 0x4e, 0x0d, 0x72, 0x0c, 0xf4, 0xa4, 0x64, 0x8e, - 0x59, 0x03, 0xc7, 0xac, 0x99, 0x43, 0xe9, 0x3d, 0xc9, 0x1c, 0xd0, 0xc0, 0x01, 0x65, 0x8e, 0x07, - 0x60, 0xd1, 0xdc, 0x61, 0x92, 0x59, 0x26, 0x0c, 0x2c, 0x13, 0x39, 0x2c, 0x6a, 0x17, 0x49, 0x66, - 0x19, 0x37, 0xb0, 0x8c, 0xcb, 0x2c, 0x35, 0x80, 0xf2, 0xfa, 0x44, 0x32, 0xcf, 0x8c, 0x81, 0x67, - 0x26, 0x8f, 0x47, 0xeb, 0x03, 0xc9, 0x3c, 0x65, 0x03, 0x4f, 0xd9, 0x58, 0x6d, 0x72, 0xb7, 0xe7, - 0xa4, 0x7a, 0x2d, 0xc8, 0x0c, 0x5b, 0x60, 0xce, 0xd0, 0xd8, 0x39, 0x89, 0xc2, 0x92, 0x29, 0xee, - 0x82, 0xb2, 0xde, 0xc5, 0x91, 0xe7, 0x4f, 0x1a, 0xe6, 0x4f, 0x1a, 0x8a, 0x44, 0xef, 0xd8, 0xc8, - 0x1c, 0x67, 0x0c, 0x1c, 0x67, 0x06, 0x97, 0xa1, 0xb7, 0x66, 0x4e, 0xa2, 0x28, 0xc9, 0x14, 0x31, - 0x38, 0x37, 0xa4, 0xf7, 0x62, 0xa0, 0x7a, 0x4f, 0xa6, 0x7a, 0x8d, 0x17, 0x1f, 0x92, 0xcf, 0x23, - 0x70, 0x7e, 0x58, 0xf3, 0xc5, 0xe0, 0x74, 0x5d, 0x75, 0x3a, 0xf4, 0x5d, 0x88, 0xe4, 0xa8, 0x4d, - 0x0b, 0xce, 0xd4, 0x74, 0x31, 0x38, 0xb9, 0x23, 0x3b, 0x19, 0xf5, 0xed, 0x88, 0xe4, 0x2d, 0x00, - 0x67, 0x73, 0x1b, 0x2f, 0x06, 0x77, 0xab, 0xaa, 0xbb, 0xfc, 0x77, 0x26, 0x99, 0x8b, 0x95, 0xdb, - 0x00, 0x48, 0x2d, 0xa2, 0x09, 0x60, 0xd7, 0xea, 0xf5, 0xf2, 0x18, 0xfe, 0xa5, 0xba, 0xe5, 0x97, - 0x2d, 0xfa, 0xcb, 0x8b, 0x72, 0x01, 0xbb, 0xdb, 0xdd, 0x7e, 0x58, 0xfe, 0x1f, 0xff, 0xcf, 0xaa, - 0x4e, 0xf1, 0xe6, 0x09, 0x39, 0xc0, 0x56, 0xde, 0x00, 0xd3, 0x5a, 0x67, 0xab, 0x04, 0xac, 0x26, - 0x3f, 0x50, 0x9a, 0xd7, 0x6e, 0x00, 0x90, 0xfd, 0x63, 0x18, 0x38, 0x03, 0x8a, 0xfb, 0xbb, 0x7b, - 0x4f, 0xb7, 0xef, 0xef, 0xd4, 0x76, 0xb6, 0x1f, 0x94, 0xc7, 0x60, 0x09, 0x4c, 0x3e, 0xf5, 0xeb, - 0xcf, 0xea, 0xd5, 0xfd, 0x5a, 0xd9, 0x82, 0x93, 0xe0, 0xd4, 0xe3, 0xbd, 0xfa, 0x6e, 0xb9, 0x70, - 0xed, 0x1e, 0x28, 0xca, 0x8d, 0xa5, 0x19, 0x50, 0xac, 0xd5, 0xfd, 0xed, 0x9d, 0x87, 0xbb, 0x0d, - 0x1a, 0xa9, 0x64, 0xa0, 0x11, 0x2b, 0x86, 0x17, 0xe5, 0x42, 0xf5, 0x22, 0xb8, 0xd0, 0x8c, 0x3a, - 0x03, 0x7f, 0xb6, 0x48, 0xc9, 0x79, 0x39, 0x4e, 0xac, 0x9b, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, - 0x29, 0x30, 0x51, 0x54, 0x22, 0x25, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.proto b/vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.proto deleted file mode 100644 index fc96074a..00000000 --- a/vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.proto +++ /dev/null @@ -1,273 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; -package conformance; -option java_package = "com.google.protobuf.conformance"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -// This defines the conformance testing protocol. This protocol exists between -// the conformance test suite itself and the code being tested. For each test, -// the suite will send a ConformanceRequest message and expect a -// ConformanceResponse message. -// -// You can either run the tests in two different ways: -// -// 1. in-process (using the interface in conformance_test.h). -// -// 2. as a sub-process communicating over a pipe. Information about how to -// do this is in conformance_test_runner.cc. -// -// Pros/cons of the two approaches: -// -// - running as a sub-process is much simpler for languages other than C/C++. -// -// - running as a sub-process may be more tricky in unusual environments like -// iOS apps, where fork/stdin/stdout are not available. - -enum WireFormat { - UNSPECIFIED = 0; - PROTOBUF = 1; - JSON = 2; -} - -// Represents a single test case's input. The testee should: -// -// 1. parse this proto (which should always succeed) -// 2. parse the protobuf or JSON payload in "payload" (which may fail) -// 3. if the parse succeeded, serialize the message in the requested format. -message ConformanceRequest { - // The payload (whether protobuf of JSON) is always for a TestAllTypes proto - // (see below). - oneof payload { - bytes protobuf_payload = 1; - string json_payload = 2; - } - - // Which format should the testee serialize its message to? - WireFormat requested_output_format = 3; -} - -// Represents a single test case's output. -message ConformanceResponse { - oneof result { - // This string should be set to indicate parsing failed. The string can - // provide more information about the parse error if it is available. - // - // Setting this string does not necessarily mean the testee failed the - // test. Some of the test cases are intentionally invalid input. - string parse_error = 1; - - // If the input was successfully parsed but errors occurred when - // serializing it to the requested output format, set the error message in - // this field. - string serialize_error = 6; - - // This should be set if some other error occurred. This will always - // indicate that the test failed. The string can provide more information - // about the failure. - string runtime_error = 2; - - // If the input was successfully parsed and the requested output was - // protobuf, serialize it to protobuf and set it in this field. - bytes protobuf_payload = 3; - - // If the input was successfully parsed and the requested output was JSON, - // serialize to JSON and set it in this field. - string json_payload = 4; - - // For when the testee skipped the test, likely because a certain feature - // wasn't supported, like JSON input/output. - string skipped = 5; - } -} - -// This proto includes every type of field in both singular and repeated -// forms. -message TestAllTypes { - message NestedMessage { - int32 a = 1; - TestAllTypes corecursive = 2; - } - - enum NestedEnum { - FOO = 0; - BAR = 1; - BAZ = 2; - NEG = -1; // Intentionally negative. - } - - // Singular - int32 optional_int32 = 1; - int64 optional_int64 = 2; - uint32 optional_uint32 = 3; - uint64 optional_uint64 = 4; - sint32 optional_sint32 = 5; - sint64 optional_sint64 = 6; - fixed32 optional_fixed32 = 7; - fixed64 optional_fixed64 = 8; - sfixed32 optional_sfixed32 = 9; - sfixed64 optional_sfixed64 = 10; - float optional_float = 11; - double optional_double = 12; - bool optional_bool = 13; - string optional_string = 14; - bytes optional_bytes = 15; - - NestedMessage optional_nested_message = 18; - ForeignMessage optional_foreign_message = 19; - - NestedEnum optional_nested_enum = 21; - ForeignEnum optional_foreign_enum = 22; - - string optional_string_piece = 24 [ctype=STRING_PIECE]; - string optional_cord = 25 [ctype=CORD]; - - TestAllTypes recursive_message = 27; - - // Repeated - repeated int32 repeated_int32 = 31; - repeated int64 repeated_int64 = 32; - repeated uint32 repeated_uint32 = 33; - repeated uint64 repeated_uint64 = 34; - repeated sint32 repeated_sint32 = 35; - repeated sint64 repeated_sint64 = 36; - repeated fixed32 repeated_fixed32 = 37; - repeated fixed64 repeated_fixed64 = 38; - repeated sfixed32 repeated_sfixed32 = 39; - repeated sfixed64 repeated_sfixed64 = 40; - repeated float repeated_float = 41; - repeated double repeated_double = 42; - repeated bool repeated_bool = 43; - repeated string repeated_string = 44; - repeated bytes repeated_bytes = 45; - - repeated NestedMessage repeated_nested_message = 48; - repeated ForeignMessage repeated_foreign_message = 49; - - repeated NestedEnum repeated_nested_enum = 51; - repeated ForeignEnum repeated_foreign_enum = 52; - - repeated string repeated_string_piece = 54 [ctype=STRING_PIECE]; - repeated string repeated_cord = 55 [ctype=CORD]; - - // Map - map < int32, int32> map_int32_int32 = 56; - map < int64, int64> map_int64_int64 = 57; - map < uint32, uint32> map_uint32_uint32 = 58; - map < uint64, uint64> map_uint64_uint64 = 59; - map < sint32, sint32> map_sint32_sint32 = 60; - map < sint64, sint64> map_sint64_sint64 = 61; - map < fixed32, fixed32> map_fixed32_fixed32 = 62; - map < fixed64, fixed64> map_fixed64_fixed64 = 63; - map map_sfixed32_sfixed32 = 64; - map map_sfixed64_sfixed64 = 65; - map < int32, float> map_int32_float = 66; - map < int32, double> map_int32_double = 67; - map < bool, bool> map_bool_bool = 68; - map < string, string> map_string_string = 69; - map < string, bytes> map_string_bytes = 70; - map < string, NestedMessage> map_string_nested_message = 71; - map < string, ForeignMessage> map_string_foreign_message = 72; - map < string, NestedEnum> map_string_nested_enum = 73; - map < string, ForeignEnum> map_string_foreign_enum = 74; - - oneof oneof_field { - uint32 oneof_uint32 = 111; - NestedMessage oneof_nested_message = 112; - string oneof_string = 113; - bytes oneof_bytes = 114; - } - - // Well-known types - google.protobuf.BoolValue optional_bool_wrapper = 201; - google.protobuf.Int32Value optional_int32_wrapper = 202; - google.protobuf.Int64Value optional_int64_wrapper = 203; - google.protobuf.UInt32Value optional_uint32_wrapper = 204; - google.protobuf.UInt64Value optional_uint64_wrapper = 205; - google.protobuf.FloatValue optional_float_wrapper = 206; - google.protobuf.DoubleValue optional_double_wrapper = 207; - google.protobuf.StringValue optional_string_wrapper = 208; - google.protobuf.BytesValue optional_bytes_wrapper = 209; - - repeated google.protobuf.BoolValue repeated_bool_wrapper = 211; - repeated google.protobuf.Int32Value repeated_int32_wrapper = 212; - repeated google.protobuf.Int64Value repeated_int64_wrapper = 213; - repeated google.protobuf.UInt32Value repeated_uint32_wrapper = 214; - repeated google.protobuf.UInt64Value repeated_uint64_wrapper = 215; - repeated google.protobuf.FloatValue repeated_float_wrapper = 216; - repeated google.protobuf.DoubleValue repeated_double_wrapper = 217; - repeated google.protobuf.StringValue repeated_string_wrapper = 218; - repeated google.protobuf.BytesValue repeated_bytes_wrapper = 219; - - google.protobuf.Duration optional_duration = 301; - google.protobuf.Timestamp optional_timestamp = 302; - google.protobuf.FieldMask optional_field_mask = 303; - google.protobuf.Struct optional_struct = 304; - google.protobuf.Any optional_any = 305; - google.protobuf.Value optional_value = 306; - - repeated google.protobuf.Duration repeated_duration = 311; - repeated google.protobuf.Timestamp repeated_timestamp = 312; - repeated google.protobuf.FieldMask repeated_fieldmask = 313; - repeated google.protobuf.Struct repeated_struct = 324; - repeated google.protobuf.Any repeated_any = 315; - repeated google.protobuf.Value repeated_value = 316; - - // Test field-name-to-JSON-name convention. - int32 fieldname1 = 401; - int32 field_name2 = 402; - int32 _field_name3 = 403; - int32 field__name4_ = 404; - int32 field0name5 = 405; - int32 field_0_name6 = 406; - int32 fieldName7 = 407; - int32 FieldName8 = 408; - int32 field_Name9 = 409; - int32 Field_Name10 = 410; - int32 FIELD_NAME11 = 411; - int32 FIELD_name12 = 412; -} - -message ForeignMessage { - int32 c = 1; -} - -enum ForeignEnum { - FOREIGN_FOO = 0; - FOREIGN_BAR = 1; - FOREIGN_BAZ = 2; -} diff --git a/vendor/github.com/golang/protobuf/conformance/test.sh b/vendor/github.com/golang/protobuf/conformance/test.sh deleted file mode 100755 index e6de29b9..00000000 --- a/vendor/github.com/golang/protobuf/conformance/test.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -PROTOBUF_ROOT=$1 -CONFORMANCE_ROOT=$1/conformance -CONFORMANCE_TEST_RUNNER=$CONFORMANCE_ROOT/conformance-test-runner - -cd $(dirname $0) - -if [[ $PROTOBUF_ROOT == "" ]]; then - echo "usage: test.sh " >/dev/stderr - exit 1 -fi - -if [[ ! -x $CONFORMANCE_TEST_RUNNER ]]; then - echo "SKIP: conformance test runner not installed" >/dev/stderr - exit 0 -fi - -a=$CONFORMANCE_ROOT/conformance.proto -b=internal/conformance_proto/conformance.proto -if [[ $(diff $a $b) != "" ]]; then - cp $a $b - echo "WARNING: conformance.proto is out of date" >/dev/stderr -fi - -$CONFORMANCE_TEST_RUNNER --failure_list failure_list_go.txt ./conformance.sh diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go deleted file mode 100644 index ac7e51bf..00000000 --- a/vendor/github.com/golang/protobuf/descriptor/descriptor.go +++ /dev/null @@ -1,93 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package descriptor provides functions for obtaining protocol buffer -// descriptors for generated Go types. -// -// These functions cannot go in package proto because they depend on the -// generated protobuf descriptor messages, which themselves depend on proto. -package descriptor - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - - "github.com/golang/protobuf/proto" - protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" -) - -// extractFile extracts a FileDescriptorProto from a gzip'd buffer. -func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) { - r, err := gzip.NewReader(bytes.NewReader(gz)) - if err != nil { - return nil, fmt.Errorf("failed to open gzip reader: %v", err) - } - defer r.Close() - - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) - } - - fd := new(protobuf.FileDescriptorProto) - if err := proto.Unmarshal(b, fd); err != nil { - return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) - } - - return fd, nil -} - -// Message is a proto.Message with a method to return its descriptor. -// -// Message types generated by the protocol compiler always satisfy -// the Message interface. -type Message interface { - proto.Message - Descriptor() ([]byte, []int) -} - -// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it -// describing the given message. -func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) { - gz, path := msg.Descriptor() - fd, err := extractFile(gz) - if err != nil { - panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) - } - - md = fd.MessageType[path[0]] - for _, i := range path[1:] { - md = md.NestedType[i] - } - return fd, md -} diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go deleted file mode 100644 index bf5174d3..00000000 --- a/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package descriptor_test - -import ( - "fmt" - "testing" - - "github.com/golang/protobuf/descriptor" - tpb "github.com/golang/protobuf/proto/test_proto" - protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" -) - -func TestMessage(t *testing.T) { - var msg *protobuf.DescriptorProto - fd, md := descriptor.ForMessage(msg) - if pkg, want := fd.GetPackage(), "google.protobuf"; pkg != want { - t.Errorf("descriptor.ForMessage(%T).GetPackage() = %q; want %q", msg, pkg, want) - } - if name, want := md.GetName(), "DescriptorProto"; name != want { - t.Fatalf("descriptor.ForMessage(%T).GetName() = %q; want %q", msg, name, want) - } -} - -func Example_options() { - var msg *tpb.MyMessageSet - _, md := descriptor.ForMessage(msg) - if md.GetOptions().GetMessageSetWireFormat() { - fmt.Printf("%v uses option message_set_wire_format.\n", md.GetName()) - } - - // Output: - // MyMessageSet uses option message_set_wire_format. -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go deleted file mode 100644 index ada2b78e..00000000 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ /dev/null @@ -1,1271 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. -It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. - -This package produces a different output than the standard "encoding/json" package, -which does not operate correctly on protocol buffers. -*/ -package jsonpb - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - - stpb "github.com/golang/protobuf/ptypes/struct" -) - -const secondInNanos = int64(time.Second / time.Nanosecond) - -// Marshaler is a configurable object for converting between -// protocol buffer objects and a JSON representation for them. -type Marshaler struct { - // Whether to render enum values as integers, as opposed to string values. - EnumsAsInts bool - - // Whether to render fields with zero values. - EmitDefaults bool - - // A string to indent each level by. The presence of this field will - // also cause a space to appear between the field separator and - // value, and for newlines to be appear between fields and array - // elements. - Indent string - - // Whether to use the original (.proto) name for fields. - OrigName bool - - // A custom URL resolver to use when marshaling Any messages to JSON. - // If unset, the default resolution strategy is to extract the - // fully-qualified type name from the type URL and pass that to - // proto.MessageType(string). - AnyResolver AnyResolver -} - -// AnyResolver takes a type URL, present in an Any message, and resolves it into -// an instance of the associated message. -type AnyResolver interface { - Resolve(typeUrl string) (proto.Message, error) -} - -func defaultResolveAny(typeUrl string) (proto.Message, error) { - // Only the part of typeUrl after the last slash is relevant. - mname := typeUrl - if slash := strings.LastIndex(mname, "/"); slash >= 0 { - mname = mname[slash+1:] - } - mt := proto.MessageType(mname) - if mt == nil { - return nil, fmt.Errorf("unknown message type %q", mname) - } - return reflect.New(mt.Elem()).Interface().(proto.Message), nil -} - -// JSONPBMarshaler is implemented by protobuf messages that customize the -// way they are marshaled to JSON. Messages that implement this should -// also implement JSONPBUnmarshaler so that the custom format can be -// parsed. -// -// The JSON marshaling must follow the proto to JSON specification: -// https://developers.google.com/protocol-buffers/docs/proto3#json -type JSONPBMarshaler interface { - MarshalJSONPB(*Marshaler) ([]byte, error) -} - -// JSONPBUnmarshaler is implemented by protobuf messages that customize -// the way they are unmarshaled from JSON. Messages that implement this -// should also implement JSONPBMarshaler so that the custom format can be -// produced. -// -// The JSON unmarshaling must follow the JSON to proto specification: -// https://developers.google.com/protocol-buffers/docs/proto3#json -type JSONPBUnmarshaler interface { - UnmarshalJSONPB(*Unmarshaler, []byte) error -} - -// Marshal marshals a protocol buffer into JSON. -func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { - v := reflect.ValueOf(pb) - if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { - return errors.New("Marshal called with nil") - } - // Check for unset required fields first. - if err := checkRequiredFields(pb); err != nil { - return err - } - writer := &errWriter{writer: out} - return m.marshalObject(writer, pb, "", "") -} - -// MarshalToString converts a protocol buffer object to JSON string. -func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { - var buf bytes.Buffer - if err := m.Marshal(&buf, pb); err != nil { - return "", err - } - return buf.String(), nil -} - -type int32Slice []int32 - -var nonFinite = map[string]float64{ - `"NaN"`: math.NaN(), - `"Infinity"`: math.Inf(1), - `"-Infinity"`: math.Inf(-1), -} - -// For sorting extensions ids to ensure stable output. -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type wkt interface { - XXX_WellKnownType() string -} - -// marshalObject writes a struct to the Writer. -func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { - if jsm, ok := v.(JSONPBMarshaler); ok { - b, err := jsm.MarshalJSONPB(m) - if err != nil { - return err - } - if typeURL != "" { - // we are marshaling this object to an Any type - var js map[string]*json.RawMessage - if err = json.Unmarshal(b, &js); err != nil { - return fmt.Errorf("type %T produced invalid JSON: %v", v, err) - } - turl, err := json.Marshal(typeURL) - if err != nil { - return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) - } - js["@type"] = (*json.RawMessage)(&turl) - if b, err = json.Marshal(js); err != nil { - return err - } - } - - out.write(string(b)) - return out.err - } - - s := reflect.ValueOf(v).Elem() - - // Handle well-known types. - if wkt, ok := v.(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - // "Wrappers use the same representation in JSON - // as the wrapped primitive type, ..." - sprop := proto.GetProperties(s.Type()) - return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) - case "Any": - // Any is a bit more involved. - return m.marshalAny(out, v, indent) - case "Duration": - // "Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision." - s, ns := s.Field(0).Int(), s.Field(1).Int() - if ns <= -secondInNanos || ns >= secondInNanos { - return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) - } - if (s > 0 && ns < 0) || (s < 0 && ns > 0) { - return errors.New("signs of seconds and nanos do not match") - } - if s < 0 { - ns = -ns - } - x := fmt.Sprintf("%d.%09d", s, ns) - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - out.write(`"`) - out.write(x) - out.write(`s"`) - return out.err - case "Struct", "ListValue": - // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) - case "Timestamp": - // "RFC 3339, where generated output will always be Z-normalized - // and uses 0, 3, 6 or 9 fractional digits." - s, ns := s.Field(0).Int(), s.Field(1).Int() - if ns < 0 || ns >= secondInNanos { - return fmt.Errorf("ns out of range [0, %v)", secondInNanos) - } - t := time.Unix(s, ns).UTC() - // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). - x := t.Format("2006-01-02T15:04:05.000000000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - out.write(`"`) - out.write(x) - out.write(`Z"`) - return out.err - case "Value": - // Value has a single oneof. - kind := s.Field(0) - if kind.IsNil() { - // "absence of any variant indicates an error" - return errors.New("nil Value") - } - // oneof -> *T -> T -> T.F - x := kind.Elem().Elem().Field(0) - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, x, indent) - } - } - - out.write("{") - if m.Indent != "" { - out.write("\n") - } - - firstField := true - - if typeURL != "" { - if err := m.marshalTypeURL(out, indent, typeURL); err != nil { - return err - } - firstField = false - } - - for i := 0; i < s.NumField(); i++ { - value := s.Field(i) - valueField := s.Type().Field(i) - if strings.HasPrefix(valueField.Name, "XXX_") { - continue - } - - // IsNil will panic on most value kinds. - switch value.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface: - if value.IsNil() { - continue - } - } - - if !m.EmitDefaults { - switch value.Kind() { - case reflect.Bool: - if !value.Bool() { - continue - } - case reflect.Int32, reflect.Int64: - if value.Int() == 0 { - continue - } - case reflect.Uint32, reflect.Uint64: - if value.Uint() == 0 { - continue - } - case reflect.Float32, reflect.Float64: - if value.Float() == 0 { - continue - } - case reflect.String: - if value.Len() == 0 { - continue - } - case reflect.Map, reflect.Ptr, reflect.Slice: - if value.IsNil() { - continue - } - } - } - - // Oneof fields need special handling. - if valueField.Tag.Get("protobuf_oneof") != "" { - // value is an interface containing &T{real_value}. - sv := value.Elem().Elem() // interface -> *T -> T - value = sv.Field(0) - valueField = sv.Type().Field(0) - } - prop := jsonProperties(valueField, m.OrigName) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, prop, value, indent); err != nil { - return err - } - firstField = false - } - - // Handle proto2 extensions. - if ep, ok := v.(proto.Message); ok { - extensions := proto.RegisteredExtensions(v) - // Sort extensions for stable output. - ids := make([]int32, 0, len(extensions)) - for id, desc := range extensions { - if !proto.HasExtension(ep, desc) { - continue - } - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - for _, id := range ids { - desc := extensions[id] - if desc == nil { - // unknown extension - continue - } - ext, extErr := proto.GetExtension(ep, desc) - if extErr != nil { - return extErr - } - value := reflect.ValueOf(ext) - var prop proto.Properties - prop.Parse(desc.Tag) - prop.JSONName = fmt.Sprintf("[%s]", desc.Name) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, &prop, value, indent); err != nil { - return err - } - firstField = false - } - - } - - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err -} - -func (m *Marshaler) writeSep(out *errWriter) { - if m.Indent != "" { - out.write(",\n") - } else { - out.write(",") - } -} - -func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { - // "If the Any contains a value that has a special JSON mapping, - // it will be converted as follows: {"@type": xxx, "value": yyy}. - // Otherwise, the value will be converted into a JSON object, - // and the "@type" field will be inserted to indicate the actual data type." - v := reflect.ValueOf(any).Elem() - turl := v.Field(0).String() - val := v.Field(1).Bytes() - - var msg proto.Message - var err error - if m.AnyResolver != nil { - msg, err = m.AnyResolver.Resolve(turl) - } else { - msg, err = defaultResolveAny(turl) - } - if err != nil { - return err - } - - if err := proto.Unmarshal(val, msg); err != nil { - return err - } - - if _, ok := msg.(wkt); ok { - out.write("{") - if m.Indent != "" { - out.write("\n") - } - if err := m.marshalTypeURL(out, indent, turl); err != nil { - return err - } - m.writeSep(out) - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - out.write(`"value": `) - } else { - out.write(`"value":`) - } - if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { - return err - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err - } - - return m.marshalObject(out, msg, indent, turl) -} - -func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"@type":`) - if m.Indent != "" { - out.write(" ") - } - b, err := json.Marshal(typeURL) - if err != nil { - return err - } - out.write(string(b)) - return out.err -} - -// marshalField writes field description and value to the Writer. -func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"`) - out.write(prop.JSONName) - out.write(`":`) - if m.Indent != "" { - out.write(" ") - } - if err := m.marshalValue(out, prop, v, indent); err != nil { - return err - } - return nil -} - -// marshalValue writes the value to the Writer. -func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - var err error - v = reflect.Indirect(v) - - // Handle nil pointer - if v.Kind() == reflect.Invalid { - out.write("null") - return out.err - } - - // Handle repeated elements. - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - out.write("[") - comma := "" - for i := 0; i < v.Len(); i++ { - sliceVal := v.Index(i) - out.write(comma) - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { - return err - } - comma = "," - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write("]") - return out.err - } - - // Handle well-known types. - // Most are handled up in marshalObject (because 99% are messages). - if wkt, ok := v.Interface().(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "NullValue": - out.write("null") - return out.err - } - } - - // Handle enumerations. - if !m.EnumsAsInts && prop.Enum != "" { - // Unknown enum values will are stringified by the proto library as their - // value. Such values should _not_ be quoted or they will be interpreted - // as an enum string instead of their value. - enumStr := v.Interface().(fmt.Stringer).String() - var valStr string - if v.Kind() == reflect.Ptr { - valStr = strconv.Itoa(int(v.Elem().Int())) - } else { - valStr = strconv.Itoa(int(v.Int())) - } - isKnownEnum := enumStr != valStr - if isKnownEnum { - out.write(`"`) - } - out.write(enumStr) - if isKnownEnum { - out.write(`"`) - } - return out.err - } - - // Handle nested messages. - if v.Kind() == reflect.Struct { - return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") - } - - // Handle maps. - // Since Go randomizes map iteration, we sort keys for stable output. - if v.Kind() == reflect.Map { - out.write(`{`) - keys := v.MapKeys() - sort.Sort(mapKeys(keys)) - for i, k := range keys { - if i > 0 { - out.write(`,`) - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - - // TODO handle map key prop properly - b, err := json.Marshal(k.Interface()) - if err != nil { - return err - } - s := string(b) - - // If the JSON is not a string value, encode it again to make it one. - if !strings.HasPrefix(s, `"`) { - b, err := json.Marshal(s) - if err != nil { - return err - } - s = string(b) - } - - out.write(s) - out.write(`:`) - if m.Indent != "" { - out.write(` `) - } - - vprop := prop - if prop != nil && prop.MapValProp != nil { - vprop = prop.MapValProp - } - if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { - return err - } - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write(`}`) - return out.err - } - - // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - f := v.Float() - var sval string - switch { - case math.IsInf(f, 1): - sval = `"Infinity"` - case math.IsInf(f, -1): - sval = `"-Infinity"` - case math.IsNaN(f): - sval = `"NaN"` - } - if sval != "" { - out.write(sval) - return out.err - } - } - - // Default handling defers to the encoding/json library. - b, err := json.Marshal(v.Interface()) - if err != nil { - return err - } - needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) - if needToQuote { - out.write(`"`) - } - out.write(string(b)) - if needToQuote { - out.write(`"`) - } - return out.err -} - -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // Whether to allow messages to contain unknown fields, as opposed to - // failing to unmarshal. - AllowUnknownFields bool - - // A custom URL resolver to use when unmarshaling Any messages from JSON. - // If unset, the default resolution strategy is to extract the - // fully-qualified type name from the type URL and pass that to - // proto.MessageType(string). - AnyResolver AnyResolver -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - inputValue := json.RawMessage{} - if err := dec.Decode(&inputValue); err != nil { - return err - } - if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { - return err - } - return checkRequiredFields(pb) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { - dec := json.NewDecoder(r) - return u.UnmarshalNext(dec, pb) -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - return new(Unmarshaler).UnmarshalNext(dec, pb) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func Unmarshal(r io.Reader, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(r, pb) -} - -// UnmarshalString will populate the fields of a protocol buffer based -// on a JSON string. This function is lenient and will decode any options -// permutations of the related Marshaler. -func UnmarshalString(str string, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) -} - -// unmarshalValue converts/copies a value into the target. -// prop may be nil. -func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { - targetType := target.Type() - - // Allocate memory for pointer fields. - if targetType.Kind() == reflect.Ptr { - // If input value is "null" and target is a pointer type, then the field should be treated as not set - // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. - _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) - if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { - return nil - } - target.Set(reflect.New(targetType.Elem())) - - return u.unmarshalValue(target.Elem(), inputValue, prop) - } - - if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, []byte(inputValue)) - } - - // Handle well-known types that are not pointers. - if w, ok := target.Addr().Interface().(wkt); ok { - switch w.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - return u.unmarshalValue(target.Field(0), inputValue, prop) - case "Any": - // Use json.RawMessage pointer type instead of value to support pre-1.8 version. - // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see - // https://github.com/golang/go/issues/14493 - var jsonFields map[string]*json.RawMessage - if err := json.Unmarshal(inputValue, &jsonFields); err != nil { - return err - } - - val, ok := jsonFields["@type"] - if !ok || val == nil { - return errors.New("Any JSON doesn't have '@type'") - } - - var turl string - if err := json.Unmarshal([]byte(*val), &turl); err != nil { - return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) - } - target.Field(0).SetString(turl) - - var m proto.Message - var err error - if u.AnyResolver != nil { - m, err = u.AnyResolver.Resolve(turl) - } else { - m, err = defaultResolveAny(turl) - } - if err != nil { - return err - } - - if _, ok := m.(wkt); ok { - val, ok := jsonFields["value"] - if !ok { - return errors.New("Any JSON doesn't have 'value'") - } - - if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) - } - } else { - delete(jsonFields, "@type") - nestedProto, err := json.Marshal(jsonFields) - if err != nil { - return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) - } - - if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) - } - } - - b, err := proto.Marshal(m) - if err != nil { - return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) - } - target.Field(1).SetBytes(b) - - return nil - case "Duration": - unq, err := unquote(string(inputValue)) - if err != nil { - return err - } - - d, err := time.ParseDuration(unq) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - target.Field(0).SetInt(s) - target.Field(1).SetInt(ns) - return nil - case "Timestamp": - unq, err := unquote(string(inputValue)) - if err != nil { - return err - } - - t, err := time.Parse(time.RFC3339Nano, unq) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - - target.Field(0).SetInt(t.Unix()) - target.Field(1).SetInt(int64(t.Nanosecond())) - return nil - case "Struct": - var m map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &m); err != nil { - return fmt.Errorf("bad StructValue: %v", err) - } - - target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) - for k, jv := range m { - pv := &stpb.Value{} - if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { - return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) - } - target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) - } - return nil - case "ListValue": - var s []json.RawMessage - if err := json.Unmarshal(inputValue, &s); err != nil { - return fmt.Errorf("bad ListValue: %v", err) - } - - target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) - for i, sv := range s { - if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { - return err - } - } - return nil - case "Value": - ivStr := string(inputValue) - if ivStr == "null" { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) - } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) - } else if v, err := unquote(ivStr); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) - } else if v, err := strconv.ParseBool(ivStr); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) - } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { - lv := &stpb.ListValue{} - target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) - return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) - } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { - sv := &stpb.Struct{} - target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) - return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) - } else { - return fmt.Errorf("unrecognized type for Value %q", ivStr) - } - return nil - } - } - - // Handle enums, which have an underlying type of int32, - // and may appear as strings. - // The case of an enum appearing as a number is handled - // at the bottom of this function. - if inputValue[0] == '"' && prop != nil && prop.Enum != "" { - vmap := proto.EnumValueMap(prop.Enum) - // Don't need to do unquoting; valid enum names - // are from a limited character set. - s := inputValue[1 : len(inputValue)-1] - n, ok := vmap[string(s)] - if !ok { - return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) - } - if target.Kind() == reflect.Ptr { // proto2 - target.Set(reflect.New(targetType.Elem())) - target = target.Elem() - } - if targetType.Kind() != reflect.Int32 { - return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) - } - target.SetInt(int64(n)) - return nil - } - - // Handle nested messages. - if targetType.Kind() == reflect.Struct { - var jsonFields map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &jsonFields); err != nil { - return err - } - - consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { - // Be liberal in what names we accept; both orig_name and camelName are okay. - fieldNames := acceptedJSONFieldNames(prop) - - vOrig, okOrig := jsonFields[fieldNames.orig] - vCamel, okCamel := jsonFields[fieldNames.camel] - if !okOrig && !okCamel { - return nil, false - } - // If, for some reason, both are present in the data, favour the camelName. - var raw json.RawMessage - if okOrig { - raw = vOrig - delete(jsonFields, fieldNames.orig) - } - if okCamel { - raw = vCamel - delete(jsonFields, fieldNames.camel) - } - return raw, true - } - - sprops := proto.GetProperties(targetType) - for i := 0; i < target.NumField(); i++ { - ft := target.Type().Field(i) - if strings.HasPrefix(ft.Name, "XXX_") { - continue - } - - valueForField, ok := consumeField(sprops.Prop[i]) - if !ok { - continue - } - - if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { - return err - } - } - // Check for any oneof fields. - if len(jsonFields) > 0 { - for _, oop := range sprops.OneofTypes { - raw, ok := consumeField(oop.Prop) - if !ok { - continue - } - nv := reflect.New(oop.Type.Elem()) - target.Field(oop.Field).Set(nv) - if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { - return err - } - } - } - // Handle proto2 extensions. - if len(jsonFields) > 0 { - if ep, ok := target.Addr().Interface().(proto.Message); ok { - for _, ext := range proto.RegisteredExtensions(ep) { - name := fmt.Sprintf("[%s]", ext.Name) - raw, ok := jsonFields[name] - if !ok { - continue - } - delete(jsonFields, name) - nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) - if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { - return err - } - if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { - return err - } - } - } - } - if !u.AllowUnknownFields && len(jsonFields) > 0 { - // Pick any field to be the scapegoat. - var f string - for fname := range jsonFields { - f = fname - break - } - return fmt.Errorf("unknown field %q in %v", f, targetType) - } - return nil - } - - // Handle arrays (which aren't encoded bytes) - if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { - var slc []json.RawMessage - if err := json.Unmarshal(inputValue, &slc); err != nil { - return err - } - if slc != nil { - l := len(slc) - target.Set(reflect.MakeSlice(targetType, l, l)) - for i := 0; i < l; i++ { - if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { - return err - } - } - } - return nil - } - - // Handle maps (whose keys are always strings) - if targetType.Kind() == reflect.Map { - var mp map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &mp); err != nil { - return err - } - if mp != nil { - target.Set(reflect.MakeMap(targetType)) - for ks, raw := range mp { - // Unmarshal map key. The core json library already decoded the key into a - // string, so we handle that specially. Other types were quoted post-serialization. - var k reflect.Value - if targetType.Key().Kind() == reflect.String { - k = reflect.ValueOf(ks) - } else { - k = reflect.New(targetType.Key()).Elem() - var kprop *proto.Properties - if prop != nil && prop.MapKeyProp != nil { - kprop = prop.MapKeyProp - } - if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { - return err - } - } - - // Unmarshal map value. - v := reflect.New(targetType.Elem()).Elem() - var vprop *proto.Properties - if prop != nil && prop.MapValProp != nil { - vprop = prop.MapValProp - } - if err := u.unmarshalValue(v, raw, vprop); err != nil { - return err - } - target.SetMapIndex(k, v) - } - } - return nil - } - - // Non-finite numbers can be encoded as strings. - isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 - if isFloat { - if num, ok := nonFinite[string(inputValue)]; ok { - target.SetFloat(num) - return nil - } - } - - // integers & floats can be encoded as strings. In this case we drop - // the quotes and proceed as normal. - isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || - targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || - targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 - if isNum && strings.HasPrefix(string(inputValue), `"`) { - inputValue = inputValue[1 : len(inputValue)-1] - } - - // Use the encoding/json for parsing other value types. - return json.Unmarshal(inputValue, target.Addr().Interface()) -} - -func unquote(s string) (string, error) { - var ret string - err := json.Unmarshal([]byte(s), &ret) - return ret, err -} - -// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. -func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { - var prop proto.Properties - prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) - if origName || prop.JSONName == "" { - prop.JSONName = prop.OrigName - } - return &prop -} - -type fieldNames struct { - orig, camel string -} - -func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { - opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} - if prop.JSONName != "" { - opts.camel = prop.JSONName - } - return opts -} - -// Writer wrapper inspired by https://blog.golang.org/errors-are-values -type errWriter struct { - writer io.Writer - err error -} - -func (w *errWriter) write(str string) { - if w.err != nil { - return - } - _, w.err = w.writer.Write([]byte(str)) -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. -// -// Numeric keys are sorted in numeric order per -// https://developers.google.com/protocol-buffers/docs/proto#maps. -type mapKeys []reflect.Value - -func (s mapKeys) Len() int { return len(s) } -func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s mapKeys) Less(i, j int) bool { - if k := s[i].Kind(); k == s[j].Kind() { - switch k { - case reflect.String: - return s[i].String() < s[j].String() - case reflect.Int32, reflect.Int64: - return s[i].Int() < s[j].Int() - case reflect.Uint32, reflect.Uint64: - return s[i].Uint() < s[j].Uint() - } - } - return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) -} - -// checkRequiredFields returns an error if any required field in the given proto message is not set. -// This function is used by both Marshal and Unmarshal. While required fields only exist in a -// proto2 message, a proto3 message can contain proto2 message(s). -func checkRequiredFields(pb proto.Message) error { - // Most well-known type messages do not contain required fields. The "Any" type may contain - // a message that has required fields. - // - // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value - // field in order to transform that into JSON, and that should have returned an error if a - // required field is not set in the embedded message. - // - // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the - // embedded message to store the serialized message in Any.Value field, and that should have - // returned an error if a required field is not set. - if _, ok := pb.(wkt); ok { - return nil - } - - v := reflect.ValueOf(pb) - // Skip message if it is not a struct pointer. - if v.Kind() != reflect.Ptr { - return nil - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return nil - } - - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - sfield := v.Type().Field(i) - - if sfield.PkgPath != "" { - // blank PkgPath means the field is exported; skip if not exported - continue - } - - if strings.HasPrefix(sfield.Name, "XXX_") { - continue - } - - // Oneof field is an interface implemented by wrapper structs containing the actual oneof - // field, i.e. an interface containing &T{real_value}. - if sfield.Tag.Get("protobuf_oneof") != "" { - if field.Kind() != reflect.Interface { - continue - } - v := field.Elem() - if v.Kind() != reflect.Ptr || v.IsNil() { - continue - } - v = v.Elem() - if v.Kind() != reflect.Struct || v.NumField() < 1 { - continue - } - field = v.Field(0) - sfield = v.Type().Field(0) - } - - protoTag := sfield.Tag.Get("protobuf") - if protoTag == "" { - continue - } - var prop proto.Properties - prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) - - switch field.Kind() { - case reflect.Map: - if field.IsNil() { - continue - } - // Check each map value. - keys := field.MapKeys() - for _, k := range keys { - v := field.MapIndex(k) - if err := checkRequiredFieldsInValue(v); err != nil { - return err - } - } - case reflect.Slice: - // Handle non-repeated type, e.g. bytes. - if !prop.Repeated { - if prop.Required && field.IsNil() { - return fmt.Errorf("required field %q is not set", prop.Name) - } - continue - } - - // Handle repeated type. - if field.IsNil() { - continue - } - // Check each slice item. - for i := 0; i < field.Len(); i++ { - v := field.Index(i) - if err := checkRequiredFieldsInValue(v); err != nil { - return err - } - } - case reflect.Ptr: - if field.IsNil() { - if prop.Required { - return fmt.Errorf("required field %q is not set", prop.Name) - } - continue - } - if err := checkRequiredFieldsInValue(field); err != nil { - return err - } - } - } - - // Handle proto2 extensions. - for _, ext := range proto.RegisteredExtensions(pb) { - if !proto.HasExtension(pb, ext) { - continue - } - ep, err := proto.GetExtension(pb, ext) - if err != nil { - return err - } - err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) - if err != nil { - return err - } - } - - return nil -} - -func checkRequiredFieldsInValue(v reflect.Value) error { - if pm, ok := v.Interface().(proto.Message); ok { - return checkRequiredFields(pm) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go deleted file mode 100644 index 45a13d45..00000000 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go +++ /dev/null @@ -1,1231 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package jsonpb - -import ( - "bytes" - "encoding/json" - "io" - "math" - "reflect" - "strings" - "testing" - - "github.com/golang/protobuf/proto" - - pb "github.com/golang/protobuf/jsonpb/jsonpb_test_proto" - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - "github.com/golang/protobuf/ptypes" - anypb "github.com/golang/protobuf/ptypes/any" - durpb "github.com/golang/protobuf/ptypes/duration" - stpb "github.com/golang/protobuf/ptypes/struct" - tspb "github.com/golang/protobuf/ptypes/timestamp" - wpb "github.com/golang/protobuf/ptypes/wrappers" -) - -var ( - marshaler = Marshaler{} - - marshalerAllOptions = Marshaler{ - Indent: " ", - } - - simpleObject = &pb.Simple{ - OInt32: proto.Int32(-32), - OInt32Str: proto.Int32(-32), - OInt64: proto.Int64(-6400000000), - OInt64Str: proto.Int64(-6400000000), - OUint32: proto.Uint32(32), - OUint32Str: proto.Uint32(32), - OUint64: proto.Uint64(6400000000), - OUint64Str: proto.Uint64(6400000000), - OSint32: proto.Int32(-13), - OSint32Str: proto.Int32(-13), - OSint64: proto.Int64(-2600000000), - OSint64Str: proto.Int64(-2600000000), - OFloat: proto.Float32(3.14), - OFloatStr: proto.Float32(3.14), - ODouble: proto.Float64(6.02214179e23), - ODoubleStr: proto.Float64(6.02214179e23), - OBool: proto.Bool(true), - OString: proto.String("hello \"there\""), - OBytes: []byte("beep boop"), - } - - simpleObjectInputJSON = `{` + - `"oBool":true,` + - `"oInt32":-32,` + - `"oInt32Str":"-32",` + - `"oInt64":-6400000000,` + - `"oInt64Str":"-6400000000",` + - `"oUint32":32,` + - `"oUint32Str":"32",` + - `"oUint64":6400000000,` + - `"oUint64Str":"6400000000",` + - `"oSint32":-13,` + - `"oSint32Str":"-13",` + - `"oSint64":-2600000000,` + - `"oSint64Str":"-2600000000",` + - `"oFloat":3.14,` + - `"oFloatStr":"3.14",` + - `"oDouble":6.02214179e+23,` + - `"oDoubleStr":"6.02214179e+23",` + - `"oString":"hello \"there\"",` + - `"oBytes":"YmVlcCBib29w"` + - `}` - - simpleObjectOutputJSON = `{` + - `"oBool":true,` + - `"oInt32":-32,` + - `"oInt32Str":-32,` + - `"oInt64":"-6400000000",` + - `"oInt64Str":"-6400000000",` + - `"oUint32":32,` + - `"oUint32Str":32,` + - `"oUint64":"6400000000",` + - `"oUint64Str":"6400000000",` + - `"oSint32":-13,` + - `"oSint32Str":-13,` + - `"oSint64":"-2600000000",` + - `"oSint64Str":"-2600000000",` + - `"oFloat":3.14,` + - `"oFloatStr":3.14,` + - `"oDouble":6.02214179e+23,` + - `"oDoubleStr":6.02214179e+23,` + - `"oString":"hello \"there\"",` + - `"oBytes":"YmVlcCBib29w"` + - `}` - - simpleObjectInputPrettyJSON = `{ - "oBool": true, - "oInt32": -32, - "oInt32Str": "-32", - "oInt64": -6400000000, - "oInt64Str": "-6400000000", - "oUint32": 32, - "oUint32Str": "32", - "oUint64": 6400000000, - "oUint64Str": "6400000000", - "oSint32": -13, - "oSint32Str": "-13", - "oSint64": -2600000000, - "oSint64Str": "-2600000000", - "oFloat": 3.14, - "oFloatStr": "3.14", - "oDouble": 6.02214179e+23, - "oDoubleStr": "6.02214179e+23", - "oString": "hello \"there\"", - "oBytes": "YmVlcCBib29w" -}` - - simpleObjectOutputPrettyJSON = `{ - "oBool": true, - "oInt32": -32, - "oInt32Str": -32, - "oInt64": "-6400000000", - "oInt64Str": "-6400000000", - "oUint32": 32, - "oUint32Str": 32, - "oUint64": "6400000000", - "oUint64Str": "6400000000", - "oSint32": -13, - "oSint32Str": -13, - "oSint64": "-2600000000", - "oSint64Str": "-2600000000", - "oFloat": 3.14, - "oFloatStr": 3.14, - "oDouble": 6.02214179e+23, - "oDoubleStr": 6.02214179e+23, - "oString": "hello \"there\"", - "oBytes": "YmVlcCBib29w" -}` - - repeatsObject = &pb.Repeats{ - RBool: []bool{true, false, true}, - RInt32: []int32{-3, -4, -5}, - RInt64: []int64{-123456789, -987654321}, - RUint32: []uint32{1, 2, 3}, - RUint64: []uint64{6789012345, 3456789012}, - RSint32: []int32{-1, -2, -3}, - RSint64: []int64{-6789012345, -3456789012}, - RFloat: []float32{3.14, 6.28}, - RDouble: []float64{299792458 * 1e20, 6.62606957e-34}, - RString: []string{"happy", "days"}, - RBytes: [][]byte{[]byte("skittles"), []byte("m&m's")}, - } - - repeatsObjectJSON = `{` + - `"rBool":[true,false,true],` + - `"rInt32":[-3,-4,-5],` + - `"rInt64":["-123456789","-987654321"],` + - `"rUint32":[1,2,3],` + - `"rUint64":["6789012345","3456789012"],` + - `"rSint32":[-1,-2,-3],` + - `"rSint64":["-6789012345","-3456789012"],` + - `"rFloat":[3.14,6.28],` + - `"rDouble":[2.99792458e+28,6.62606957e-34],` + - `"rString":["happy","days"],` + - `"rBytes":["c2tpdHRsZXM=","bSZtJ3M="]` + - `}` - - repeatsObjectPrettyJSON = `{ - "rBool": [ - true, - false, - true - ], - "rInt32": [ - -3, - -4, - -5 - ], - "rInt64": [ - "-123456789", - "-987654321" - ], - "rUint32": [ - 1, - 2, - 3 - ], - "rUint64": [ - "6789012345", - "3456789012" - ], - "rSint32": [ - -1, - -2, - -3 - ], - "rSint64": [ - "-6789012345", - "-3456789012" - ], - "rFloat": [ - 3.14, - 6.28 - ], - "rDouble": [ - 2.99792458e+28, - 6.62606957e-34 - ], - "rString": [ - "happy", - "days" - ], - "rBytes": [ - "c2tpdHRsZXM=", - "bSZtJ3M=" - ] -}` - - innerSimple = &pb.Simple{OInt32: proto.Int32(-32)} - innerSimple2 = &pb.Simple{OInt64: proto.Int64(25)} - innerRepeats = &pb.Repeats{RString: []string{"roses", "red"}} - innerRepeats2 = &pb.Repeats{RString: []string{"violets", "blue"}} - complexObject = &pb.Widget{ - Color: pb.Widget_GREEN.Enum(), - RColor: []pb.Widget_Color{pb.Widget_RED, pb.Widget_GREEN, pb.Widget_BLUE}, - Simple: innerSimple, - RSimple: []*pb.Simple{innerSimple, innerSimple2}, - Repeats: innerRepeats, - RRepeats: []*pb.Repeats{innerRepeats, innerRepeats2}, - } - - complexObjectJSON = `{"color":"GREEN",` + - `"rColor":["RED","GREEN","BLUE"],` + - `"simple":{"oInt32":-32},` + - `"rSimple":[{"oInt32":-32},{"oInt64":"25"}],` + - `"repeats":{"rString":["roses","red"]},` + - `"rRepeats":[{"rString":["roses","red"]},{"rString":["violets","blue"]}]` + - `}` - - complexObjectPrettyJSON = `{ - "color": "GREEN", - "rColor": [ - "RED", - "GREEN", - "BLUE" - ], - "simple": { - "oInt32": -32 - }, - "rSimple": [ - { - "oInt32": -32 - }, - { - "oInt64": "25" - } - ], - "repeats": { - "rString": [ - "roses", - "red" - ] - }, - "rRepeats": [ - { - "rString": [ - "roses", - "red" - ] - }, - { - "rString": [ - "violets", - "blue" - ] - } - ] -}` - - colorPrettyJSON = `{ - "color": 2 -}` - - colorListPrettyJSON = `{ - "color": 1000, - "rColor": [ - "RED" - ] -}` - - nummyPrettyJSON = `{ - "nummy": { - "1": 2, - "3": 4 - } -}` - - objjyPrettyJSON = `{ - "objjy": { - "1": { - "dub": 1 - } - } -}` - realNumber = &pb.Real{Value: proto.Float64(3.14159265359)} - realNumberName = "Pi" - complexNumber = &pb.Complex{Imaginary: proto.Float64(0.5772156649)} - realNumberJSON = `{` + - `"value":3.14159265359,` + - `"[jsonpb.Complex.real_extension]":{"imaginary":0.5772156649},` + - `"[jsonpb.name]":"Pi"` + - `}` - - anySimple = &pb.KnownTypes{ - An: &anypb.Any{ - TypeUrl: "something.example.com/jsonpb.Simple", - Value: []byte{ - // &pb.Simple{OBool:true} - 1 << 3, 1, - }, - }, - } - anySimpleJSON = `{"an":{"@type":"something.example.com/jsonpb.Simple","oBool":true}}` - anySimplePrettyJSON = `{ - "an": { - "@type": "something.example.com/jsonpb.Simple", - "oBool": true - } -}` - - anyWellKnown = &pb.KnownTypes{ - An: &anypb.Any{ - TypeUrl: "type.googleapis.com/google.protobuf.Duration", - Value: []byte{ - // &durpb.Duration{Seconds: 1, Nanos: 212000000 } - 1 << 3, 1, // seconds - 2 << 3, 0x80, 0xba, 0x8b, 0x65, // nanos - }, - }, - } - anyWellKnownJSON = `{"an":{"@type":"type.googleapis.com/google.protobuf.Duration","value":"1.212s"}}` - anyWellKnownPrettyJSON = `{ - "an": { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } -}` - - nonFinites = &pb.NonFinites{ - FNan: proto.Float32(float32(math.NaN())), - FPinf: proto.Float32(float32(math.Inf(1))), - FNinf: proto.Float32(float32(math.Inf(-1))), - DNan: proto.Float64(float64(math.NaN())), - DPinf: proto.Float64(float64(math.Inf(1))), - DNinf: proto.Float64(float64(math.Inf(-1))), - } - nonFinitesJSON = `{` + - `"fNan":"NaN",` + - `"fPinf":"Infinity",` + - `"fNinf":"-Infinity",` + - `"dNan":"NaN",` + - `"dPinf":"Infinity",` + - `"dNinf":"-Infinity"` + - `}` -) - -func init() { - if err := proto.SetExtension(realNumber, pb.E_Name, &realNumberName); err != nil { - panic(err) - } - if err := proto.SetExtension(realNumber, pb.E_Complex_RealExtension, complexNumber); err != nil { - panic(err) - } -} - -var marshalingTests = []struct { - desc string - marshaler Marshaler - pb proto.Message - json string -}{ - {"simple flat object", marshaler, simpleObject, simpleObjectOutputJSON}, - {"simple pretty object", marshalerAllOptions, simpleObject, simpleObjectOutputPrettyJSON}, - {"non-finite floats fields object", marshaler, nonFinites, nonFinitesJSON}, - {"repeated fields flat object", marshaler, repeatsObject, repeatsObjectJSON}, - {"repeated fields pretty object", marshalerAllOptions, repeatsObject, repeatsObjectPrettyJSON}, - {"nested message/enum flat object", marshaler, complexObject, complexObjectJSON}, - {"nested message/enum pretty object", marshalerAllOptions, complexObject, complexObjectPrettyJSON}, - {"enum-string flat object", Marshaler{}, - &pb.Widget{Color: pb.Widget_BLUE.Enum()}, `{"color":"BLUE"}`}, - {"enum-value pretty object", Marshaler{EnumsAsInts: true, Indent: " "}, - &pb.Widget{Color: pb.Widget_BLUE.Enum()}, colorPrettyJSON}, - {"unknown enum value object", marshalerAllOptions, - &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}, colorListPrettyJSON}, - {"repeated proto3 enum", Marshaler{}, - &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ - proto3pb.Message_PUNS, - proto3pb.Message_SLAPSTICK, - }}, - `{"rFunny":["PUNS","SLAPSTICK"]}`}, - {"repeated proto3 enum as int", Marshaler{EnumsAsInts: true}, - &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ - proto3pb.Message_PUNS, - proto3pb.Message_SLAPSTICK, - }}, - `{"rFunny":[1,2]}`}, - {"empty value", marshaler, &pb.Simple3{}, `{}`}, - {"empty value emitted", Marshaler{EmitDefaults: true}, &pb.Simple3{}, `{"dub":0}`}, - {"empty repeated emitted", Marshaler{EmitDefaults: true}, &pb.SimpleSlice3{}, `{"slices":[]}`}, - {"empty map emitted", Marshaler{EmitDefaults: true}, &pb.SimpleMap3{}, `{"stringy":{}}`}, - {"nested struct null", Marshaler{EmitDefaults: true}, &pb.SimpleNull3{}, `{"simple":null}`}, - {"map", marshaler, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, `{"nummy":{"1":2,"3":4}}`}, - {"map", marshalerAllOptions, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, nummyPrettyJSON}, - {"map", marshaler, - &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}, - `{"strry":{"\"one\"":"two","three":"four"}}`}, - {"map", marshaler, - &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}, `{"objjy":{"1":{"dub":1}}}`}, - {"map", marshalerAllOptions, - &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}, objjyPrettyJSON}, - {"map", marshaler, &pb.Mappy{Buggy: map[int64]string{1234: "yup"}}, - `{"buggy":{"1234":"yup"}}`}, - {"map", marshaler, &pb.Mappy{Booly: map[bool]bool{false: true}}, `{"booly":{"false":true}}`}, - {"map", marshaler, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":"ROMAN"}}`}, - {"map", Marshaler{EnumsAsInts: true}, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":2}}`}, - {"map", marshaler, &pb.Mappy{S32Booly: map[int32]bool{1: true, 3: false, 10: true, 12: false}}, `{"s32booly":{"1":true,"3":false,"10":true,"12":false}}`}, - {"map", marshaler, &pb.Mappy{S64Booly: map[int64]bool{1: true, 3: false, 10: true, 12: false}}, `{"s64booly":{"1":true,"3":false,"10":true,"12":false}}`}, - {"map", marshaler, &pb.Mappy{U32Booly: map[uint32]bool{1: true, 3: false, 10: true, 12: false}}, `{"u32booly":{"1":true,"3":false,"10":true,"12":false}}`}, - {"map", marshaler, &pb.Mappy{U64Booly: map[uint64]bool{1: true, 3: false, 10: true, 12: false}}, `{"u64booly":{"1":true,"3":false,"10":true,"12":false}}`}, - {"proto2 map", marshaler, &pb.Maps{MInt64Str: map[int64]string{213: "cat"}}, - `{"mInt64Str":{"213":"cat"}}`}, - {"proto2 map", marshaler, - &pb.Maps{MBoolSimple: map[bool]*pb.Simple{true: {OInt32: proto.Int32(1)}}}, - `{"mBoolSimple":{"true":{"oInt32":1}}}`}, - {"oneof, not set", marshaler, &pb.MsgWithOneof{}, `{}`}, - {"oneof, set", marshaler, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Title{"Grand Poobah"}}, `{"title":"Grand Poobah"}`}, - {"force orig_name", Marshaler{OrigName: true}, &pb.Simple{OInt32: proto.Int32(4)}, - `{"o_int32":4}`}, - {"proto2 extension", marshaler, realNumber, realNumberJSON}, - {"Any with message", marshaler, anySimple, anySimpleJSON}, - {"Any with message and indent", marshalerAllOptions, anySimple, anySimplePrettyJSON}, - {"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON}, - {"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON}, - {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3s"}`}, - {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3, Nanos: 1e6}}, `{"dur":"3.001s"}`}, - {"Duration beyond float64 precision", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 100000000, Nanos: 1}}, `{"dur":"100000000.000000001s"}`}, - {"negative Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: -123, Nanos: -456}}, `{"dur":"-123.000000456s"}`}, - {"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{ - Fields: map[string]*stpb.Value{ - "one": {Kind: &stpb.Value_StringValue{"loneliest number"}}, - "two": {Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}, - }, - }}, `{"st":{"one":"loneliest number","two":null}}`}, - {"empty ListValue", marshaler, &pb.KnownTypes{Lv: &stpb.ListValue{}}, `{"lv":[]}`}, - {"basic ListValue", marshaler, &pb.KnownTypes{Lv: &stpb.ListValue{Values: []*stpb.Value{ - {Kind: &stpb.Value_StringValue{"x"}}, - {Kind: &stpb.Value_NullValue{}}, - {Kind: &stpb.Value_NumberValue{3}}, - {Kind: &stpb.Value_BoolValue{true}}, - }}}, `{"lv":["x",null,3,true]}`}, - {"Timestamp", marshaler, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}, `{"ts":"2014-05-13T16:53:20.021Z"}`}, - {"Timestamp", marshaler, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 0}}, `{"ts":"2014-05-13T16:53:20Z"}`}, - {"number Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NumberValue{1}}}, `{"val":1}`}, - {"null Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}}, `{"val":null}`}, - {"string number value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"9223372036854775807"}}}, `{"val":"9223372036854775807"}`}, - {"list of lists Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{ - Kind: &stpb.Value_ListValue{&stpb.ListValue{ - Values: []*stpb.Value{ - {Kind: &stpb.Value_StringValue{"x"}}, - {Kind: &stpb.Value_ListValue{&stpb.ListValue{ - Values: []*stpb.Value{ - {Kind: &stpb.Value_ListValue{&stpb.ListValue{ - Values: []*stpb.Value{{Kind: &stpb.Value_StringValue{"y"}}}, - }}}, - {Kind: &stpb.Value_StringValue{"z"}}, - }, - }}}, - }, - }}, - }}, `{"val":["x",[["y"],"z"]]}`}, - - {"DoubleValue", marshaler, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}, `{"dbl":1.2}`}, - {"FloatValue", marshaler, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}, `{"flt":1.2}`}, - {"Int64Value", marshaler, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}, `{"i64":"-3"}`}, - {"UInt64Value", marshaler, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}, `{"u64":"3"}`}, - {"Int32Value", marshaler, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}, `{"i32":-4}`}, - {"UInt32Value", marshaler, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}, `{"u32":4}`}, - {"BoolValue", marshaler, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}, `{"bool":true}`}, - {"StringValue", marshaler, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}, `{"str":"plush"}`}, - {"BytesValue", marshaler, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}, `{"bytes":"d293"}`}, - - {"required", marshaler, &pb.MsgWithRequired{Str: proto.String("hello")}, `{"str":"hello"}`}, - {"required bytes", marshaler, &pb.MsgWithRequiredBytes{Byts: []byte{}}, `{"byts":""}`}, -} - -func TestMarshaling(t *testing.T) { - for _, tt := range marshalingTests { - json, err := tt.marshaler.MarshalToString(tt.pb) - if err != nil { - t.Errorf("%s: marshaling error: %v", tt.desc, err) - } else if tt.json != json { - t.Errorf("%s: got [%v] want [%v]", tt.desc, json, tt.json) - } - } -} - -func TestMarshalingNil(t *testing.T) { - var msg *pb.Simple - m := &Marshaler{} - if _, err := m.MarshalToString(msg); err == nil { - t.Errorf("mashaling nil returned no error") - } -} - -func TestMarshalIllegalTime(t *testing.T) { - tests := []struct { - pb proto.Message - fail bool - }{ - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: 1, Nanos: 0}}, false}, - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: -1, Nanos: 0}}, false}, - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: 1, Nanos: -1}}, true}, - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: -1, Nanos: 1}}, true}, - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: 1, Nanos: 1000000000}}, true}, - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: -1, Nanos: -1000000000}}, true}, - {&pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 1, Nanos: 1}}, false}, - {&pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 1, Nanos: -1}}, true}, - {&pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 1, Nanos: 1000000000}}, true}, - } - for _, tt := range tests { - _, err := marshaler.MarshalToString(tt.pb) - if err == nil && tt.fail { - t.Errorf("marshaler.MarshalToString(%v) = _, ; want _, ", tt.pb) - } - if err != nil && !tt.fail { - t.Errorf("marshaler.MarshalToString(%v) = _, %v; want _, ", tt.pb, err) - } - } -} - -func TestMarshalJSONPBMarshaler(t *testing.T) { - rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }` - msg := dynamicMessage{RawJson: rawJson} - str, err := new(Marshaler).MarshalToString(&msg) - if err != nil { - t.Errorf("an unexpected error occurred when marshalling JSONPBMarshaler: %v", err) - } - if str != rawJson { - t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, rawJson) - } -} - -func TestMarshalAnyJSONPBMarshaler(t *testing.T) { - msg := dynamicMessage{RawJson: `{ "foo": "bar", "baz": [0, 1, 2, 3] }`} - a, err := ptypes.MarshalAny(&msg) - if err != nil { - t.Errorf("an unexpected error occurred when marshalling to Any: %v", err) - } - str, err := new(Marshaler).MarshalToString(a) - if err != nil { - t.Errorf("an unexpected error occurred when marshalling Any to JSON: %v", err) - } - // after custom marshaling, it's round-tripped through JSON decoding/encoding already, - // so the keys are sorted, whitespace is compacted, and "@type" key has been added - expected := `{"@type":"type.googleapis.com/` + dynamicMessageName + `","baz":[0,1,2,3],"foo":"bar"}` - if str != expected { - t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, expected) - } -} - -func TestMarshalWithCustomValidation(t *testing.T) { - msg := dynamicMessage{RawJson: `{ "foo": "bar", "baz": [0, 1, 2, 3] }`, Dummy: &dynamicMessage{}} - - js, err := new(Marshaler).MarshalToString(&msg) - if err != nil { - t.Errorf("an unexpected error occurred when marshalling to json: %v", err) - } - err = Unmarshal(strings.NewReader(js), &msg) - if err != nil { - t.Errorf("an unexpected error occurred when unmarshalling from json: %v", err) - } -} - -// Test marshaling message containing unset required fields should produce error. -func TestMarshalUnsetRequiredFields(t *testing.T) { - msgExt := &pb.Real{} - proto.SetExtension(msgExt, pb.E_Extm, &pb.MsgWithRequired{}) - - tests := []struct { - desc string - marshaler *Marshaler - pb proto.Message - }{ - { - desc: "direct required field", - marshaler: &Marshaler{}, - pb: &pb.MsgWithRequired{}, - }, - { - desc: "direct required field + emit defaults", - marshaler: &Marshaler{EmitDefaults: true}, - pb: &pb.MsgWithRequired{}, - }, - { - desc: "indirect required field", - marshaler: &Marshaler{}, - pb: &pb.MsgWithIndirectRequired{Subm: &pb.MsgWithRequired{}}, - }, - { - desc: "indirect required field + emit defaults", - marshaler: &Marshaler{EmitDefaults: true}, - pb: &pb.MsgWithIndirectRequired{Subm: &pb.MsgWithRequired{}}, - }, - { - desc: "direct required wkt field", - marshaler: &Marshaler{}, - pb: &pb.MsgWithRequiredWKT{}, - }, - { - desc: "direct required wkt field + emit defaults", - marshaler: &Marshaler{EmitDefaults: true}, - pb: &pb.MsgWithRequiredWKT{}, - }, - { - desc: "direct required bytes field", - marshaler: &Marshaler{}, - pb: &pb.MsgWithRequiredBytes{}, - }, - { - desc: "required in map value", - marshaler: &Marshaler{}, - pb: &pb.MsgWithIndirectRequired{ - MapField: map[string]*pb.MsgWithRequired{ - "key": {}, - }, - }, - }, - { - desc: "required in repeated item", - marshaler: &Marshaler{}, - pb: &pb.MsgWithIndirectRequired{ - SliceField: []*pb.MsgWithRequired{ - {Str: proto.String("hello")}, - {}, - }, - }, - }, - { - desc: "required inside oneof", - marshaler: &Marshaler{}, - pb: &pb.MsgWithOneof{ - Union: &pb.MsgWithOneof_MsgWithRequired{&pb.MsgWithRequired{}}, - }, - }, - { - desc: "required inside extension", - marshaler: &Marshaler{}, - pb: msgExt, - }, - } - - for _, tc := range tests { - if _, err := tc.marshaler.MarshalToString(tc.pb); err == nil { - t.Errorf("%s: expecting error in marshaling with unset required fields %+v", tc.desc, tc.pb) - } - } -} - -var unmarshalingTests = []struct { - desc string - unmarshaler Unmarshaler - json string - pb proto.Message -}{ - {"simple flat object", Unmarshaler{}, simpleObjectInputJSON, simpleObject}, - {"simple pretty object", Unmarshaler{}, simpleObjectInputPrettyJSON, simpleObject}, - {"repeated fields flat object", Unmarshaler{}, repeatsObjectJSON, repeatsObject}, - {"repeated fields pretty object", Unmarshaler{}, repeatsObjectPrettyJSON, repeatsObject}, - {"nested message/enum flat object", Unmarshaler{}, complexObjectJSON, complexObject}, - {"nested message/enum pretty object", Unmarshaler{}, complexObjectPrettyJSON, complexObject}, - {"enum-string object", Unmarshaler{}, `{"color":"BLUE"}`, &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, - {"enum-value object", Unmarshaler{}, "{\n \"color\": 2\n}", &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, - {"unknown field with allowed option", Unmarshaler{AllowUnknownFields: true}, `{"unknown": "foo"}`, new(pb.Simple)}, - {"proto3 enum string", Unmarshaler{}, `{"hilarity":"PUNS"}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - {"proto3 enum value", Unmarshaler{}, `{"hilarity":1}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - {"unknown enum value object", - Unmarshaler{}, - "{\n \"color\": 1000,\n \"r_color\": [\n \"RED\"\n ]\n}", - &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}}, - {"repeated proto3 enum", Unmarshaler{}, `{"rFunny":["PUNS","SLAPSTICK"]}`, - &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ - proto3pb.Message_PUNS, - proto3pb.Message_SLAPSTICK, - }}}, - {"repeated proto3 enum as int", Unmarshaler{}, `{"rFunny":[1,2]}`, - &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ - proto3pb.Message_PUNS, - proto3pb.Message_SLAPSTICK, - }}}, - {"repeated proto3 enum as mix of strings and ints", Unmarshaler{}, `{"rFunny":["PUNS",2]}`, - &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ - proto3pb.Message_PUNS, - proto3pb.Message_SLAPSTICK, - }}}, - {"unquoted int64 object", Unmarshaler{}, `{"oInt64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}}, - {"unquoted uint64 object", Unmarshaler{}, `{"oUint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}}, - {"NaN", Unmarshaler{}, `{"oDouble":"NaN"}`, &pb.Simple{ODouble: proto.Float64(math.NaN())}}, - {"Inf", Unmarshaler{}, `{"oFloat":"Infinity"}`, &pb.Simple{OFloat: proto.Float32(float32(math.Inf(1)))}}, - {"-Inf", Unmarshaler{}, `{"oDouble":"-Infinity"}`, &pb.Simple{ODouble: proto.Float64(math.Inf(-1))}}, - {"map", Unmarshaler{}, `{"nummy":{"1":2,"3":4}}`, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}}, - {"map", Unmarshaler{}, `{"strry":{"\"one\"":"two","three":"four"}}`, &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}}, - {"map", Unmarshaler{}, `{"objjy":{"1":{"dub":1}}}`, &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}}, - {"proto2 extension", Unmarshaler{}, realNumberJSON, realNumber}, - {"Any with message", Unmarshaler{}, anySimpleJSON, anySimple}, - {"Any with message and indent", Unmarshaler{}, anySimplePrettyJSON, anySimple}, - {"Any with WKT", Unmarshaler{}, anyWellKnownJSON, anyWellKnown}, - {"Any with WKT and indent", Unmarshaler{}, anyWellKnownPrettyJSON, anyWellKnown}, - {"map", Unmarshaler{}, `{"enumy":{"XIV":"ROMAN"}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, - {"map", Unmarshaler{}, `{"enumy":{"XIV":2}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, - {"oneof", Unmarshaler{}, `{"salary":31000}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Salary{31000}}}, - {"oneof spec name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, - {"oneof orig_name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, - {"oneof spec name2", Unmarshaler{}, `{"homeAddress":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}}, - {"oneof orig_name2", Unmarshaler{}, `{"home_address":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}}, - {"orig_name input", Unmarshaler{}, `{"o_bool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, - {"camelName input", Unmarshaler{}, `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, - - {"Duration", Unmarshaler{}, `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}}, - {"Duration", Unmarshaler{}, `{"dur":"4s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 4}}}, - {"Duration with unicode", Unmarshaler{}, `{"dur": "3\u0073"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}}, - {"null Duration", Unmarshaler{}, `{"dur":null}`, &pb.KnownTypes{Dur: nil}}, - {"Timestamp", Unmarshaler{}, `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}}, - {"Timestamp", Unmarshaler{}, `{"ts":"2014-05-13T16:53:20Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 0}}}, - {"Timestamp with unicode", Unmarshaler{}, `{"ts": "2014-05-13T16:53:20\u005a"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 0}}}, - {"PreEpochTimestamp", Unmarshaler{}, `{"ts":"1969-12-31T23:59:58.999999995Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -2, Nanos: 999999995}}}, - {"ZeroTimeTimestamp", Unmarshaler{}, `{"ts":"0001-01-01T00:00:00Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -62135596800, Nanos: 0}}}, - {"null Timestamp", Unmarshaler{}, `{"ts":null}`, &pb.KnownTypes{Ts: nil}}, - {"null Struct", Unmarshaler{}, `{"st": null}`, &pb.KnownTypes{St: nil}}, - {"empty Struct", Unmarshaler{}, `{"st": {}}`, &pb.KnownTypes{St: &stpb.Struct{}}}, - {"basic Struct", Unmarshaler{}, `{"st": {"a": "x", "b": null, "c": 3, "d": true}}`, &pb.KnownTypes{St: &stpb.Struct{Fields: map[string]*stpb.Value{ - "a": {Kind: &stpb.Value_StringValue{"x"}}, - "b": {Kind: &stpb.Value_NullValue{}}, - "c": {Kind: &stpb.Value_NumberValue{3}}, - "d": {Kind: &stpb.Value_BoolValue{true}}, - }}}}, - {"nested Struct", Unmarshaler{}, `{"st": {"a": {"b": 1, "c": [{"d": true}, "f"]}}}`, &pb.KnownTypes{St: &stpb.Struct{Fields: map[string]*stpb.Value{ - "a": {Kind: &stpb.Value_StructValue{&stpb.Struct{Fields: map[string]*stpb.Value{ - "b": {Kind: &stpb.Value_NumberValue{1}}, - "c": {Kind: &stpb.Value_ListValue{&stpb.ListValue{Values: []*stpb.Value{ - {Kind: &stpb.Value_StructValue{&stpb.Struct{Fields: map[string]*stpb.Value{"d": {Kind: &stpb.Value_BoolValue{true}}}}}}, - {Kind: &stpb.Value_StringValue{"f"}}, - }}}}, - }}}}, - }}}}, - {"null ListValue", Unmarshaler{}, `{"lv": null}`, &pb.KnownTypes{Lv: nil}}, - {"empty ListValue", Unmarshaler{}, `{"lv": []}`, &pb.KnownTypes{Lv: &stpb.ListValue{}}}, - {"basic ListValue", Unmarshaler{}, `{"lv": ["x", null, 3, true]}`, &pb.KnownTypes{Lv: &stpb.ListValue{Values: []*stpb.Value{ - {Kind: &stpb.Value_StringValue{"x"}}, - {Kind: &stpb.Value_NullValue{}}, - {Kind: &stpb.Value_NumberValue{3}}, - {Kind: &stpb.Value_BoolValue{true}}, - }}}}, - {"number Value", Unmarshaler{}, `{"val":1}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NumberValue{1}}}}, - {"null Value", Unmarshaler{}, `{"val":null}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}}}, - {"bool Value", Unmarshaler{}, `{"val":true}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_BoolValue{true}}}}, - {"string Value", Unmarshaler{}, `{"val":"x"}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"x"}}}}, - {"string number value", Unmarshaler{}, `{"val":"9223372036854775807"}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"9223372036854775807"}}}}, - {"list of lists Value", Unmarshaler{}, `{"val":["x", [["y"], "z"]]}`, &pb.KnownTypes{Val: &stpb.Value{ - Kind: &stpb.Value_ListValue{&stpb.ListValue{ - Values: []*stpb.Value{ - {Kind: &stpb.Value_StringValue{"x"}}, - {Kind: &stpb.Value_ListValue{&stpb.ListValue{ - Values: []*stpb.Value{ - {Kind: &stpb.Value_ListValue{&stpb.ListValue{ - Values: []*stpb.Value{{Kind: &stpb.Value_StringValue{"y"}}}, - }}}, - {Kind: &stpb.Value_StringValue{"z"}}, - }, - }}}, - }, - }}}}}, - - {"DoubleValue", Unmarshaler{}, `{"dbl":1.2}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}}, - {"FloatValue", Unmarshaler{}, `{"flt":1.2}`, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}}, - {"Int64Value", Unmarshaler{}, `{"i64":"-3"}`, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}}, - {"UInt64Value", Unmarshaler{}, `{"u64":"3"}`, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}}, - {"Int32Value", Unmarshaler{}, `{"i32":-4}`, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}}, - {"UInt32Value", Unmarshaler{}, `{"u32":4}`, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}}, - {"BoolValue", Unmarshaler{}, `{"bool":true}`, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}}, - {"StringValue", Unmarshaler{}, `{"str":"plush"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}}, - {"StringValue containing escaped character", Unmarshaler{}, `{"str":"a\/b"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "a/b"}}}, - {"StructValue containing StringValue's", Unmarshaler{}, `{"escaped": "a\/b", "unicode": "\u00004E16\u0000754C"}`, - &stpb.Struct{ - Fields: map[string]*stpb.Value{ - "escaped": {Kind: &stpb.Value_StringValue{"a/b"}}, - "unicode": {Kind: &stpb.Value_StringValue{"\u00004E16\u0000754C"}}, - }, - }}, - {"BytesValue", Unmarshaler{}, `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}}, - - // Ensure that `null` as a value ends up with a nil pointer instead of a [type]Value struct. - {"null DoubleValue", Unmarshaler{}, `{"dbl":null}`, &pb.KnownTypes{Dbl: nil}}, - {"null FloatValue", Unmarshaler{}, `{"flt":null}`, &pb.KnownTypes{Flt: nil}}, - {"null Int64Value", Unmarshaler{}, `{"i64":null}`, &pb.KnownTypes{I64: nil}}, - {"null UInt64Value", Unmarshaler{}, `{"u64":null}`, &pb.KnownTypes{U64: nil}}, - {"null Int32Value", Unmarshaler{}, `{"i32":null}`, &pb.KnownTypes{I32: nil}}, - {"null UInt32Value", Unmarshaler{}, `{"u32":null}`, &pb.KnownTypes{U32: nil}}, - {"null BoolValue", Unmarshaler{}, `{"bool":null}`, &pb.KnownTypes{Bool: nil}}, - {"null StringValue", Unmarshaler{}, `{"str":null}`, &pb.KnownTypes{Str: nil}}, - {"null BytesValue", Unmarshaler{}, `{"bytes":null}`, &pb.KnownTypes{Bytes: nil}}, - - {"required", Unmarshaler{}, `{"str":"hello"}`, &pb.MsgWithRequired{Str: proto.String("hello")}}, - {"required bytes", Unmarshaler{}, `{"byts": []}`, &pb.MsgWithRequiredBytes{Byts: []byte{}}}, -} - -func TestUnmarshaling(t *testing.T) { - for _, tt := range unmarshalingTests { - // Make a new instance of the type of our expected object. - p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message) - - err := tt.unmarshaler.Unmarshal(strings.NewReader(tt.json), p) - if err != nil { - t.Errorf("unmarshalling %s: %v", tt.desc, err) - continue - } - - // For easier diffs, compare text strings of the protos. - exp := proto.MarshalTextString(tt.pb) - act := proto.MarshalTextString(p) - if string(exp) != string(act) { - t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp) - } - } -} - -func TestUnmarshalNullArray(t *testing.T) { - var repeats pb.Repeats - if err := UnmarshalString(`{"rBool":null}`, &repeats); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(repeats, pb.Repeats{}) { - t.Errorf("got non-nil fields in [%#v]", repeats) - } -} - -func TestUnmarshalNullObject(t *testing.T) { - var maps pb.Maps - if err := UnmarshalString(`{"mInt64Str":null}`, &maps); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(maps, pb.Maps{}) { - t.Errorf("got non-nil fields in [%#v]", maps) - } -} - -func TestUnmarshalNext(t *testing.T) { - // We only need to check against a few, not all of them. - tests := unmarshalingTests[:5] - - // Create a buffer with many concatenated JSON objects. - var b bytes.Buffer - for _, tt := range tests { - b.WriteString(tt.json) - } - - dec := json.NewDecoder(&b) - for _, tt := range tests { - // Make a new instance of the type of our expected object. - p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message) - - err := tt.unmarshaler.UnmarshalNext(dec, p) - if err != nil { - t.Errorf("%s: %v", tt.desc, err) - continue - } - - // For easier diffs, compare text strings of the protos. - exp := proto.MarshalTextString(tt.pb) - act := proto.MarshalTextString(p) - if string(exp) != string(act) { - t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp) - } - } - - p := &pb.Simple{} - err := new(Unmarshaler).UnmarshalNext(dec, p) - if err != io.EOF { - t.Errorf("eof: got %v, expected io.EOF", err) - } -} - -var unmarshalingShouldError = []struct { - desc string - in string - pb proto.Message -}{ - {"a value", "666", new(pb.Simple)}, - {"gibberish", "{adskja123;l23=-=", new(pb.Simple)}, - {"unknown field", `{"unknown": "foo"}`, new(pb.Simple)}, - {"unknown enum name", `{"hilarity":"DAVE"}`, new(proto3pb.Message)}, - {"Duration containing invalid character", `{"dur": "3\U0073"}`, &pb.KnownTypes{}}, - {"Timestamp containing invalid character", `{"ts": "2014-05-13T16:53:20\U005a"}`, &pb.KnownTypes{}}, - {"StringValue containing invalid character", `{"str": "\U00004E16\U0000754C"}`, &pb.KnownTypes{}}, - {"StructValue containing invalid character", `{"str": "\U00004E16\U0000754C"}`, &stpb.Struct{}}, - {"repeated proto3 enum with non array input", `{"rFunny":"PUNS"}`, &proto3pb.Message{RFunny: []proto3pb.Message_Humour{}}}, -} - -func TestUnmarshalingBadInput(t *testing.T) { - for _, tt := range unmarshalingShouldError { - err := UnmarshalString(tt.in, tt.pb) - if err == nil { - t.Errorf("an error was expected when parsing %q instead of an object", tt.desc) - } - } -} - -type funcResolver func(turl string) (proto.Message, error) - -func (fn funcResolver) Resolve(turl string) (proto.Message, error) { - return fn(turl) -} - -func TestAnyWithCustomResolver(t *testing.T) { - var resolvedTypeUrls []string - resolver := funcResolver(func(turl string) (proto.Message, error) { - resolvedTypeUrls = append(resolvedTypeUrls, turl) - return new(pb.Simple), nil - }) - msg := &pb.Simple{ - OBytes: []byte{1, 2, 3, 4}, - OBool: proto.Bool(true), - OString: proto.String("foobar"), - OInt64: proto.Int64(1020304), - } - msgBytes, err := proto.Marshal(msg) - if err != nil { - t.Errorf("an unexpected error occurred when marshaling message: %v", err) - } - // make an Any with a type URL that won't resolve w/out custom resolver - any := &anypb.Any{ - TypeUrl: "https://foobar.com/some.random.MessageKind", - Value: msgBytes, - } - - m := Marshaler{AnyResolver: resolver} - js, err := m.MarshalToString(any) - if err != nil { - t.Errorf("an unexpected error occurred when marshaling any to JSON: %v", err) - } - if len(resolvedTypeUrls) != 1 { - t.Errorf("custom resolver was not invoked during marshaling") - } else if resolvedTypeUrls[0] != "https://foobar.com/some.random.MessageKind" { - t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[0], "https://foobar.com/some.random.MessageKind") - } - wanted := `{"@type":"https://foobar.com/some.random.MessageKind","oBool":true,"oInt64":"1020304","oString":"foobar","oBytes":"AQIDBA=="}` - if js != wanted { - t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", js, wanted) - } - - u := Unmarshaler{AnyResolver: resolver} - roundTrip := &anypb.Any{} - err = u.Unmarshal(bytes.NewReader([]byte(js)), roundTrip) - if err != nil { - t.Errorf("an unexpected error occurred when unmarshaling any from JSON: %v", err) - } - if len(resolvedTypeUrls) != 2 { - t.Errorf("custom resolver was not invoked during marshaling") - } else if resolvedTypeUrls[1] != "https://foobar.com/some.random.MessageKind" { - t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[1], "https://foobar.com/some.random.MessageKind") - } - if !proto.Equal(any, roundTrip) { - t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", roundTrip, any) - } -} - -func TestUnmarshalJSONPBUnmarshaler(t *testing.T) { - rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }` - var msg dynamicMessage - if err := Unmarshal(strings.NewReader(rawJson), &msg); err != nil { - t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err) - } - if msg.RawJson != rawJson { - t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", msg.RawJson, rawJson) - } -} - -func TestUnmarshalNullWithJSONPBUnmarshaler(t *testing.T) { - rawJson := `{"stringField":null}` - var ptrFieldMsg ptrFieldMessage - if err := Unmarshal(strings.NewReader(rawJson), &ptrFieldMsg); err != nil { - t.Errorf("unmarshal error: %v", err) - } - - want := ptrFieldMessage{StringField: &stringField{IsSet: true, StringValue: "null"}} - if !proto.Equal(&ptrFieldMsg, &want) { - t.Errorf("unmarshal result StringField: got %v, want %v", ptrFieldMsg, want) - } -} - -func TestUnmarshalAnyJSONPBUnmarshaler(t *testing.T) { - rawJson := `{ "@type": "blah.com/` + dynamicMessageName + `", "foo": "bar", "baz": [0, 1, 2, 3] }` - var got anypb.Any - if err := Unmarshal(strings.NewReader(rawJson), &got); err != nil { - t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err) - } - - dm := &dynamicMessage{RawJson: `{"baz":[0,1,2,3],"foo":"bar"}`} - var want anypb.Any - if b, err := proto.Marshal(dm); err != nil { - t.Errorf("an unexpected error occurred when marshaling message: %v", err) - } else { - want.TypeUrl = "blah.com/" + dynamicMessageName - want.Value = b - } - - if !proto.Equal(&got, &want) { - t.Errorf("message contents not set correctly after unmarshalling JSON: got %v, wanted %v", got, want) - } -} - -const ( - dynamicMessageName = "google.protobuf.jsonpb.testing.dynamicMessage" -) - -func init() { - // we register the custom type below so that we can use it in Any types - proto.RegisterType((*dynamicMessage)(nil), dynamicMessageName) -} - -type ptrFieldMessage struct { - StringField *stringField `protobuf:"bytes,1,opt,name=stringField"` -} - -func (m *ptrFieldMessage) Reset() { -} - -func (m *ptrFieldMessage) String() string { - return m.StringField.StringValue -} - -func (m *ptrFieldMessage) ProtoMessage() { -} - -type stringField struct { - IsSet bool `protobuf:"varint,1,opt,name=isSet"` - StringValue string `protobuf:"bytes,2,opt,name=stringValue"` -} - -func (s *stringField) Reset() { -} - -func (s *stringField) String() string { - return s.StringValue -} - -func (s *stringField) ProtoMessage() { -} - -func (s *stringField) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error { - s.IsSet = true - s.StringValue = string(js) - return nil -} - -// dynamicMessage implements protobuf.Message but is not a normal generated message type. -// It provides implementations of JSONPBMarshaler and JSONPBUnmarshaler for JSON support. -type dynamicMessage struct { - RawJson string `protobuf:"bytes,1,opt,name=rawJson"` - - // an unexported nested message is present just to ensure that it - // won't result in a panic (see issue #509) - Dummy *dynamicMessage `protobuf:"bytes,2,opt,name=dummy"` -} - -func (m *dynamicMessage) Reset() { - m.RawJson = "{}" -} - -func (m *dynamicMessage) String() string { - return m.RawJson -} - -func (m *dynamicMessage) ProtoMessage() { -} - -func (m *dynamicMessage) MarshalJSONPB(jm *Marshaler) ([]byte, error) { - return []byte(m.RawJson), nil -} - -func (m *dynamicMessage) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error { - m.RawJson = string(js) - return nil -} - -// Test unmarshaling message containing unset required fields should produce error. -func TestUnmarshalUnsetRequiredFields(t *testing.T) { - tests := []struct { - desc string - pb proto.Message - json string - }{ - { - desc: "direct required field missing", - pb: &pb.MsgWithRequired{}, - json: `{}`, - }, - { - desc: "direct required field set to null", - pb: &pb.MsgWithRequired{}, - json: `{"str": null}`, - }, - { - desc: "indirect required field missing", - pb: &pb.MsgWithIndirectRequired{}, - json: `{"subm": {}}`, - }, - { - desc: "indirect required field set to null", - pb: &pb.MsgWithIndirectRequired{}, - json: `{"subm": {"str": null}}`, - }, - { - desc: "direct required bytes field missing", - pb: &pb.MsgWithRequiredBytes{}, - json: `{}`, - }, - { - desc: "direct required bytes field set to null", - pb: &pb.MsgWithRequiredBytes{}, - json: `{"byts": null}`, - }, - { - desc: "direct required wkt field missing", - pb: &pb.MsgWithRequiredWKT{}, - json: `{}`, - }, - { - desc: "direct required wkt field set to null", - pb: &pb.MsgWithRequiredWKT{}, - json: `{"str": null}`, - }, - { - desc: "any containing message with required field set to null", - pb: &pb.KnownTypes{}, - json: `{"an": {"@type": "example.com/jsonpb.MsgWithRequired", "str": null}}`, - }, - { - desc: "any containing message with missing required field", - pb: &pb.KnownTypes{}, - json: `{"an": {"@type": "example.com/jsonpb.MsgWithRequired"}}`, - }, - { - desc: "missing required in map value", - pb: &pb.MsgWithIndirectRequired{}, - json: `{"map_field": {"a": {}, "b": {"str": "hi"}}}`, - }, - { - desc: "required in map value set to null", - pb: &pb.MsgWithIndirectRequired{}, - json: `{"map_field": {"a": {"str": "hello"}, "b": {"str": null}}}`, - }, - { - desc: "missing required in slice item", - pb: &pb.MsgWithIndirectRequired{}, - json: `{"slice_field": [{}, {"str": "hi"}]}`, - }, - { - desc: "required in slice item set to null", - pb: &pb.MsgWithIndirectRequired{}, - json: `{"slice_field": [{"str": "hello"}, {"str": null}]}`, - }, - { - desc: "required inside oneof missing", - pb: &pb.MsgWithOneof{}, - json: `{"msgWithRequired": {}}`, - }, - { - desc: "required inside oneof set to null", - pb: &pb.MsgWithOneof{}, - json: `{"msgWithRequired": {"str": null}}`, - }, - { - desc: "required field in extension missing", - pb: &pb.Real{}, - json: `{"[jsonpb.extm]":{}}`, - }, - { - desc: "required field in extension set to null", - pb: &pb.Real{}, - json: `{"[jsonpb.extm]":{"str": null}}`, - }, - } - - for _, tc := range tests { - if err := UnmarshalString(tc.json, tc.pb); err == nil { - t.Errorf("%s: expecting error in unmarshaling with unset required fields %s", tc.desc, tc.json) - } - } -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go deleted file mode 100644 index 0555e44f..00000000 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go +++ /dev/null @@ -1,368 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: more_test_objects.proto - -package jsonpb - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Numeral int32 - -const ( - Numeral_UNKNOWN Numeral = 0 - Numeral_ARABIC Numeral = 1 - Numeral_ROMAN Numeral = 2 -) - -var Numeral_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ARABIC", - 2: "ROMAN", -} -var Numeral_value = map[string]int32{ - "UNKNOWN": 0, - "ARABIC": 1, - "ROMAN": 2, -} - -func (x Numeral) String() string { - return proto.EnumName(Numeral_name, int32(x)) -} -func (Numeral) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{0} -} - -type Simple3 struct { - Dub float64 `protobuf:"fixed64,1,opt,name=dub,proto3" json:"dub,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Simple3) Reset() { *m = Simple3{} } -func (m *Simple3) String() string { return proto.CompactTextString(m) } -func (*Simple3) ProtoMessage() {} -func (*Simple3) Descriptor() ([]byte, []int) { - return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{0} -} -func (m *Simple3) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Simple3.Unmarshal(m, b) -} -func (m *Simple3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Simple3.Marshal(b, m, deterministic) -} -func (dst *Simple3) XXX_Merge(src proto.Message) { - xxx_messageInfo_Simple3.Merge(dst, src) -} -func (m *Simple3) XXX_Size() int { - return xxx_messageInfo_Simple3.Size(m) -} -func (m *Simple3) XXX_DiscardUnknown() { - xxx_messageInfo_Simple3.DiscardUnknown(m) -} - -var xxx_messageInfo_Simple3 proto.InternalMessageInfo - -func (m *Simple3) GetDub() float64 { - if m != nil { - return m.Dub - } - return 0 -} - -type SimpleSlice3 struct { - Slices []string `protobuf:"bytes,1,rep,name=slices,proto3" json:"slices,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SimpleSlice3) Reset() { *m = SimpleSlice3{} } -func (m *SimpleSlice3) String() string { return proto.CompactTextString(m) } -func (*SimpleSlice3) ProtoMessage() {} -func (*SimpleSlice3) Descriptor() ([]byte, []int) { - return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{1} -} -func (m *SimpleSlice3) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SimpleSlice3.Unmarshal(m, b) -} -func (m *SimpleSlice3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SimpleSlice3.Marshal(b, m, deterministic) -} -func (dst *SimpleSlice3) XXX_Merge(src proto.Message) { - xxx_messageInfo_SimpleSlice3.Merge(dst, src) -} -func (m *SimpleSlice3) XXX_Size() int { - return xxx_messageInfo_SimpleSlice3.Size(m) -} -func (m *SimpleSlice3) XXX_DiscardUnknown() { - xxx_messageInfo_SimpleSlice3.DiscardUnknown(m) -} - -var xxx_messageInfo_SimpleSlice3 proto.InternalMessageInfo - -func (m *SimpleSlice3) GetSlices() []string { - if m != nil { - return m.Slices - } - return nil -} - -type SimpleMap3 struct { - Stringy map[string]string `protobuf:"bytes,1,rep,name=stringy,proto3" json:"stringy,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SimpleMap3) Reset() { *m = SimpleMap3{} } -func (m *SimpleMap3) String() string { return proto.CompactTextString(m) } -func (*SimpleMap3) ProtoMessage() {} -func (*SimpleMap3) Descriptor() ([]byte, []int) { - return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{2} -} -func (m *SimpleMap3) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SimpleMap3.Unmarshal(m, b) -} -func (m *SimpleMap3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SimpleMap3.Marshal(b, m, deterministic) -} -func (dst *SimpleMap3) XXX_Merge(src proto.Message) { - xxx_messageInfo_SimpleMap3.Merge(dst, src) -} -func (m *SimpleMap3) XXX_Size() int { - return xxx_messageInfo_SimpleMap3.Size(m) -} -func (m *SimpleMap3) XXX_DiscardUnknown() { - xxx_messageInfo_SimpleMap3.DiscardUnknown(m) -} - -var xxx_messageInfo_SimpleMap3 proto.InternalMessageInfo - -func (m *SimpleMap3) GetStringy() map[string]string { - if m != nil { - return m.Stringy - } - return nil -} - -type SimpleNull3 struct { - Simple *Simple3 `protobuf:"bytes,1,opt,name=simple,proto3" json:"simple,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SimpleNull3) Reset() { *m = SimpleNull3{} } -func (m *SimpleNull3) String() string { return proto.CompactTextString(m) } -func (*SimpleNull3) ProtoMessage() {} -func (*SimpleNull3) Descriptor() ([]byte, []int) { - return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{3} -} -func (m *SimpleNull3) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SimpleNull3.Unmarshal(m, b) -} -func (m *SimpleNull3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SimpleNull3.Marshal(b, m, deterministic) -} -func (dst *SimpleNull3) XXX_Merge(src proto.Message) { - xxx_messageInfo_SimpleNull3.Merge(dst, src) -} -func (m *SimpleNull3) XXX_Size() int { - return xxx_messageInfo_SimpleNull3.Size(m) -} -func (m *SimpleNull3) XXX_DiscardUnknown() { - xxx_messageInfo_SimpleNull3.DiscardUnknown(m) -} - -var xxx_messageInfo_SimpleNull3 proto.InternalMessageInfo - -func (m *SimpleNull3) GetSimple() *Simple3 { - if m != nil { - return m.Simple - } - return nil -} - -type Mappy struct { - Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy,proto3" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - Strry map[string]string `protobuf:"bytes,2,rep,name=strry,proto3" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy,proto3" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy,proto3" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly,proto3" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy,proto3" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=jsonpb.Numeral"` - S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly,proto3" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly,proto3" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly,proto3" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly,proto3" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Mappy) Reset() { *m = Mappy{} } -func (m *Mappy) String() string { return proto.CompactTextString(m) } -func (*Mappy) ProtoMessage() {} -func (*Mappy) Descriptor() ([]byte, []int) { - return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{4} -} -func (m *Mappy) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Mappy.Unmarshal(m, b) -} -func (m *Mappy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Mappy.Marshal(b, m, deterministic) -} -func (dst *Mappy) XXX_Merge(src proto.Message) { - xxx_messageInfo_Mappy.Merge(dst, src) -} -func (m *Mappy) XXX_Size() int { - return xxx_messageInfo_Mappy.Size(m) -} -func (m *Mappy) XXX_DiscardUnknown() { - xxx_messageInfo_Mappy.DiscardUnknown(m) -} - -var xxx_messageInfo_Mappy proto.InternalMessageInfo - -func (m *Mappy) GetNummy() map[int64]int32 { - if m != nil { - return m.Nummy - } - return nil -} - -func (m *Mappy) GetStrry() map[string]string { - if m != nil { - return m.Strry - } - return nil -} - -func (m *Mappy) GetObjjy() map[int32]*Simple3 { - if m != nil { - return m.Objjy - } - return nil -} - -func (m *Mappy) GetBuggy() map[int64]string { - if m != nil { - return m.Buggy - } - return nil -} - -func (m *Mappy) GetBooly() map[bool]bool { - if m != nil { - return m.Booly - } - return nil -} - -func (m *Mappy) GetEnumy() map[string]Numeral { - if m != nil { - return m.Enumy - } - return nil -} - -func (m *Mappy) GetS32Booly() map[int32]bool { - if m != nil { - return m.S32Booly - } - return nil -} - -func (m *Mappy) GetS64Booly() map[int64]bool { - if m != nil { - return m.S64Booly - } - return nil -} - -func (m *Mappy) GetU32Booly() map[uint32]bool { - if m != nil { - return m.U32Booly - } - return nil -} - -func (m *Mappy) GetU64Booly() map[uint64]bool { - if m != nil { - return m.U64Booly - } - return nil -} - -func init() { - proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3") - proto.RegisterType((*SimpleSlice3)(nil), "jsonpb.SimpleSlice3") - proto.RegisterType((*SimpleMap3)(nil), "jsonpb.SimpleMap3") - proto.RegisterMapType((map[string]string)(nil), "jsonpb.SimpleMap3.StringyEntry") - proto.RegisterType((*SimpleNull3)(nil), "jsonpb.SimpleNull3") - proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy") - proto.RegisterMapType((map[bool]bool)(nil), "jsonpb.Mappy.BoolyEntry") - proto.RegisterMapType((map[int64]string)(nil), "jsonpb.Mappy.BuggyEntry") - proto.RegisterMapType((map[string]Numeral)(nil), "jsonpb.Mappy.EnumyEntry") - proto.RegisterMapType((map[int64]int32)(nil), "jsonpb.Mappy.NummyEntry") - proto.RegisterMapType((map[int32]*Simple3)(nil), "jsonpb.Mappy.ObjjyEntry") - proto.RegisterMapType((map[int32]bool)(nil), "jsonpb.Mappy.S32boolyEntry") - proto.RegisterMapType((map[int64]bool)(nil), "jsonpb.Mappy.S64boolyEntry") - proto.RegisterMapType((map[string]string)(nil), "jsonpb.Mappy.StrryEntry") - proto.RegisterMapType((map[uint32]bool)(nil), "jsonpb.Mappy.U32boolyEntry") - proto.RegisterMapType((map[uint64]bool)(nil), "jsonpb.Mappy.U64boolyEntry") - proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value) -} - -func init() { - proto.RegisterFile("more_test_objects.proto", fileDescriptor_more_test_objects_bef0d79b901f4c4a) -} - -var fileDescriptor_more_test_objects_bef0d79b901f4c4a = []byte{ - // 526 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdd, 0x6b, 0xdb, 0x3c, - 0x14, 0x87, 0x5f, 0x27, 0xf5, 0xd7, 0x49, 0xfb, 0x2e, 0x88, 0xb1, 0x99, 0xf4, 0x62, 0xc5, 0xb0, - 0xad, 0x0c, 0xe6, 0x8b, 0x78, 0x74, 0x5d, 0x77, 0x95, 0x8e, 0x5e, 0x94, 0x11, 0x07, 0x1c, 0xc2, - 0x2e, 0x4b, 0xdc, 0x99, 0x90, 0xcc, 0x5f, 0xd8, 0xd6, 0xc0, 0xd7, 0xfb, 0xbb, 0x07, 0xe3, 0x48, - 0x72, 0x2d, 0x07, 0x85, 0x6c, 0x77, 0x52, 0x7e, 0xcf, 0xe3, 0x73, 0x24, 0x1d, 0x02, 0x2f, 0xd3, - 0xbc, 0x8c, 0x1f, 0xea, 0xb8, 0xaa, 0x1f, 0xf2, 0x68, 0x17, 0x3f, 0xd6, 0x95, 0x57, 0x94, 0x79, - 0x9d, 0x13, 0x63, 0x57, 0xe5, 0x59, 0x11, 0xb9, 0xe7, 0x60, 0x2e, 0xb7, 0x69, 0x91, 0xc4, 0x3e, - 0x19, 0xc3, 0xf0, 0x3b, 0x8d, 0x1c, 0xed, 0x42, 0xbb, 0xd4, 0x42, 0x5c, 0xba, 0x6f, 0xe0, 0x94, - 0x87, 0xcb, 0x64, 0xfb, 0x18, 0xfb, 0xe4, 0x05, 0x18, 0x15, 0xae, 0x2a, 0x47, 0xbb, 0x18, 0x5e, - 0xda, 0xa1, 0xd8, 0xb9, 0xbf, 0x34, 0x00, 0x0e, 0xce, 0xd7, 0x85, 0x4f, 0x3e, 0x81, 0x59, 0xd5, - 0xe5, 0x36, 0xdb, 0x34, 0x8c, 0x1b, 0x4d, 0x5f, 0x79, 0xbc, 0x9a, 0xd7, 0x41, 0xde, 0x92, 0x13, - 0x77, 0x59, 0x5d, 0x36, 0x61, 0xcb, 0x4f, 0x6e, 0xe0, 0x54, 0x0e, 0xb0, 0xa7, 0x1f, 0x71, 0xc3, - 0x7a, 0xb2, 0x43, 0x5c, 0x92, 0xe7, 0xa0, 0xff, 0x5c, 0x27, 0x34, 0x76, 0x06, 0xec, 0x37, 0xbe, - 0xb9, 0x19, 0x5c, 0x6b, 0xee, 0x15, 0x8c, 0xf8, 0xf7, 0x03, 0x9a, 0x24, 0x3e, 0x79, 0x0b, 0x46, - 0xc5, 0xb6, 0xcc, 0x1e, 0x4d, 0x9f, 0xf5, 0x9b, 0xf0, 0x43, 0x11, 0xbb, 0xbf, 0x2d, 0xd0, 0xe7, - 0xeb, 0xa2, 0x68, 0x88, 0x07, 0x7a, 0x46, 0xd3, 0xb4, 0x6d, 0xdb, 0x69, 0x0d, 0x96, 0x7a, 0x01, - 0x46, 0xbc, 0x5f, 0x8e, 0x21, 0x5f, 0xd5, 0x65, 0xd9, 0x38, 0x03, 0x15, 0xbf, 0xc4, 0x48, 0xf0, - 0x0c, 0x43, 0x3e, 0x8f, 0x76, 0xbb, 0xc6, 0x19, 0xaa, 0xf8, 0x05, 0x46, 0x82, 0x67, 0x18, 0xf2, - 0x11, 0xdd, 0x6c, 0x1a, 0xe7, 0x44, 0xc5, 0xdf, 0x62, 0x24, 0x78, 0x86, 0x31, 0x3e, 0xcf, 0x93, - 0xc6, 0xd1, 0x95, 0x3c, 0x46, 0x2d, 0x8f, 0x6b, 0xe4, 0xe3, 0x8c, 0xa6, 0x8d, 0x63, 0xa8, 0xf8, - 0x3b, 0x8c, 0x04, 0xcf, 0x30, 0xf2, 0x11, 0xac, 0xca, 0x9f, 0xf2, 0x12, 0x26, 0x53, 0xce, 0xf7, - 0x8e, 0x2c, 0x52, 0x6e, 0x3d, 0xc1, 0x4c, 0xbc, 0xfa, 0xc0, 0x45, 0x4b, 0x29, 0x8a, 0xb4, 0x15, - 0xc5, 0x16, 0x45, 0xda, 0x56, 0xb4, 0x55, 0xe2, 0xaa, 0x5f, 0x91, 0x4a, 0x15, 0x69, 0x5b, 0x11, - 0x94, 0x62, 0xbf, 0x62, 0x0b, 0x4f, 0xae, 0x01, 0xba, 0x87, 0x96, 0xe7, 0x6f, 0xa8, 0x98, 0x3f, - 0x5d, 0x9a, 0x3f, 0x34, 0xbb, 0x27, 0xff, 0x97, 0xc9, 0x9d, 0xdc, 0x03, 0x74, 0x8f, 0x2f, 0x9b, - 0x3a, 0x37, 0x5f, 0xcb, 0xa6, 0x62, 0x92, 0xfb, 0x4d, 0x74, 0x73, 0x71, 0xac, 0x7d, 0x7b, 0xdf, - 0x7c, 0xba, 0x10, 0xd9, 0xb4, 0x14, 0xa6, 0xb5, 0xd7, 0x7e, 0x37, 0x2b, 0x8a, 0x83, 0xf7, 0xda, - 0xff, 0xbf, 0x6b, 0x3f, 0xa0, 0x69, 0x5c, 0xae, 0x13, 0xf9, 0x53, 0x9f, 0xe1, 0xac, 0x37, 0x43, - 0x8a, 0xcb, 0x38, 0xdc, 0x07, 0xca, 0xf2, 0xab, 0x1e, 0x3b, 0xfe, 0xbe, 0xbc, 0x3a, 0x54, 0xf9, - 0xec, 0x6f, 0xe4, 0x43, 0x95, 0x4f, 0x8e, 0xc8, 0xef, 0xde, 0x83, 0x29, 0x6e, 0x82, 0x8c, 0xc0, - 0x5c, 0x05, 0x5f, 0x83, 0xc5, 0xb7, 0x60, 0xfc, 0x1f, 0x01, 0x30, 0x66, 0xe1, 0xec, 0xf6, 0xfe, - 0xcb, 0x58, 0x23, 0x36, 0xe8, 0xe1, 0x62, 0x3e, 0x0b, 0xc6, 0x83, 0xc8, 0x60, 0x7f, 0xe0, 0xfe, - 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x34, 0xaf, 0xdb, 0x05, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto deleted file mode 100644 index d254fa5f..00000000 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto +++ /dev/null @@ -1,69 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package jsonpb; - -message Simple3 { - double dub = 1; -} - -message SimpleSlice3 { - repeated string slices = 1; -} - -message SimpleMap3 { - map stringy = 1; -} - -message SimpleNull3 { - Simple3 simple = 1; -} - -enum Numeral { - UNKNOWN = 0; - ARABIC = 1; - ROMAN = 2; -} - -message Mappy { - map nummy = 1; - map strry = 2; - map objjy = 3; - map buggy = 4; - map booly = 5; - map enumy = 6; - map s32booly = 7; - map s64booly = 8; - map u32booly = 9; - map u64booly = 10; -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go deleted file mode 100644 index ab7b53f7..00000000 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go +++ /dev/null @@ -1,1357 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: test_objects.proto - -package jsonpb - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import any "github.com/golang/protobuf/ptypes/any" -import duration "github.com/golang/protobuf/ptypes/duration" -import _struct "github.com/golang/protobuf/ptypes/struct" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" -import wrappers "github.com/golang/protobuf/ptypes/wrappers" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Widget_Color int32 - -const ( - Widget_RED Widget_Color = 0 - Widget_GREEN Widget_Color = 1 - Widget_BLUE Widget_Color = 2 -) - -var Widget_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Widget_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Widget_Color) Enum() *Widget_Color { - p := new(Widget_Color) - *p = x - return p -} -func (x Widget_Color) String() string { - return proto.EnumName(Widget_Color_name, int32(x)) -} -func (x *Widget_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Widget_Color_value, data, "Widget_Color") - if err != nil { - return err - } - *x = Widget_Color(value) - return nil -} -func (Widget_Color) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{3, 0} -} - -// Test message for holding primitive types. -type Simple struct { - OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"` - OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"` - OInt32Str *int32 `protobuf:"varint,3,opt,name=o_int32_str,json=oInt32Str" json:"o_int32_str,omitempty"` - OInt64 *int64 `protobuf:"varint,4,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"` - OInt64Str *int64 `protobuf:"varint,5,opt,name=o_int64_str,json=oInt64Str" json:"o_int64_str,omitempty"` - OUint32 *uint32 `protobuf:"varint,6,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"` - OUint32Str *uint32 `protobuf:"varint,7,opt,name=o_uint32_str,json=oUint32Str" json:"o_uint32_str,omitempty"` - OUint64 *uint64 `protobuf:"varint,8,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"` - OUint64Str *uint64 `protobuf:"varint,9,opt,name=o_uint64_str,json=oUint64Str" json:"o_uint64_str,omitempty"` - OSint32 *int32 `protobuf:"zigzag32,10,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"` - OSint32Str *int32 `protobuf:"zigzag32,11,opt,name=o_sint32_str,json=oSint32Str" json:"o_sint32_str,omitempty"` - OSint64 *int64 `protobuf:"zigzag64,12,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"` - OSint64Str *int64 `protobuf:"zigzag64,13,opt,name=o_sint64_str,json=oSint64Str" json:"o_sint64_str,omitempty"` - OFloat *float32 `protobuf:"fixed32,14,opt,name=o_float,json=oFloat" json:"o_float,omitempty"` - OFloatStr *float32 `protobuf:"fixed32,15,opt,name=o_float_str,json=oFloatStr" json:"o_float_str,omitempty"` - ODouble *float64 `protobuf:"fixed64,16,opt,name=o_double,json=oDouble" json:"o_double,omitempty"` - ODoubleStr *float64 `protobuf:"fixed64,17,opt,name=o_double_str,json=oDoubleStr" json:"o_double_str,omitempty"` - OString *string `protobuf:"bytes,18,opt,name=o_string,json=oString" json:"o_string,omitempty"` - OBytes []byte `protobuf:"bytes,19,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Simple) Reset() { *m = Simple{} } -func (m *Simple) String() string { return proto.CompactTextString(m) } -func (*Simple) ProtoMessage() {} -func (*Simple) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{0} -} -func (m *Simple) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Simple.Unmarshal(m, b) -} -func (m *Simple) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Simple.Marshal(b, m, deterministic) -} -func (dst *Simple) XXX_Merge(src proto.Message) { - xxx_messageInfo_Simple.Merge(dst, src) -} -func (m *Simple) XXX_Size() int { - return xxx_messageInfo_Simple.Size(m) -} -func (m *Simple) XXX_DiscardUnknown() { - xxx_messageInfo_Simple.DiscardUnknown(m) -} - -var xxx_messageInfo_Simple proto.InternalMessageInfo - -func (m *Simple) GetOBool() bool { - if m != nil && m.OBool != nil { - return *m.OBool - } - return false -} - -func (m *Simple) GetOInt32() int32 { - if m != nil && m.OInt32 != nil { - return *m.OInt32 - } - return 0 -} - -func (m *Simple) GetOInt32Str() int32 { - if m != nil && m.OInt32Str != nil { - return *m.OInt32Str - } - return 0 -} - -func (m *Simple) GetOInt64() int64 { - if m != nil && m.OInt64 != nil { - return *m.OInt64 - } - return 0 -} - -func (m *Simple) GetOInt64Str() int64 { - if m != nil && m.OInt64Str != nil { - return *m.OInt64Str - } - return 0 -} - -func (m *Simple) GetOUint32() uint32 { - if m != nil && m.OUint32 != nil { - return *m.OUint32 - } - return 0 -} - -func (m *Simple) GetOUint32Str() uint32 { - if m != nil && m.OUint32Str != nil { - return *m.OUint32Str - } - return 0 -} - -func (m *Simple) GetOUint64() uint64 { - if m != nil && m.OUint64 != nil { - return *m.OUint64 - } - return 0 -} - -func (m *Simple) GetOUint64Str() uint64 { - if m != nil && m.OUint64Str != nil { - return *m.OUint64Str - } - return 0 -} - -func (m *Simple) GetOSint32() int32 { - if m != nil && m.OSint32 != nil { - return *m.OSint32 - } - return 0 -} - -func (m *Simple) GetOSint32Str() int32 { - if m != nil && m.OSint32Str != nil { - return *m.OSint32Str - } - return 0 -} - -func (m *Simple) GetOSint64() int64 { - if m != nil && m.OSint64 != nil { - return *m.OSint64 - } - return 0 -} - -func (m *Simple) GetOSint64Str() int64 { - if m != nil && m.OSint64Str != nil { - return *m.OSint64Str - } - return 0 -} - -func (m *Simple) GetOFloat() float32 { - if m != nil && m.OFloat != nil { - return *m.OFloat - } - return 0 -} - -func (m *Simple) GetOFloatStr() float32 { - if m != nil && m.OFloatStr != nil { - return *m.OFloatStr - } - return 0 -} - -func (m *Simple) GetODouble() float64 { - if m != nil && m.ODouble != nil { - return *m.ODouble - } - return 0 -} - -func (m *Simple) GetODoubleStr() float64 { - if m != nil && m.ODoubleStr != nil { - return *m.ODoubleStr - } - return 0 -} - -func (m *Simple) GetOString() string { - if m != nil && m.OString != nil { - return *m.OString - } - return "" -} - -func (m *Simple) GetOBytes() []byte { - if m != nil { - return m.OBytes - } - return nil -} - -// Test message for holding special non-finites primitives. -type NonFinites struct { - FNan *float32 `protobuf:"fixed32,1,opt,name=f_nan,json=fNan" json:"f_nan,omitempty"` - FPinf *float32 `protobuf:"fixed32,2,opt,name=f_pinf,json=fPinf" json:"f_pinf,omitempty"` - FNinf *float32 `protobuf:"fixed32,3,opt,name=f_ninf,json=fNinf" json:"f_ninf,omitempty"` - DNan *float64 `protobuf:"fixed64,4,opt,name=d_nan,json=dNan" json:"d_nan,omitempty"` - DPinf *float64 `protobuf:"fixed64,5,opt,name=d_pinf,json=dPinf" json:"d_pinf,omitempty"` - DNinf *float64 `protobuf:"fixed64,6,opt,name=d_ninf,json=dNinf" json:"d_ninf,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NonFinites) Reset() { *m = NonFinites{} } -func (m *NonFinites) String() string { return proto.CompactTextString(m) } -func (*NonFinites) ProtoMessage() {} -func (*NonFinites) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{1} -} -func (m *NonFinites) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NonFinites.Unmarshal(m, b) -} -func (m *NonFinites) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NonFinites.Marshal(b, m, deterministic) -} -func (dst *NonFinites) XXX_Merge(src proto.Message) { - xxx_messageInfo_NonFinites.Merge(dst, src) -} -func (m *NonFinites) XXX_Size() int { - return xxx_messageInfo_NonFinites.Size(m) -} -func (m *NonFinites) XXX_DiscardUnknown() { - xxx_messageInfo_NonFinites.DiscardUnknown(m) -} - -var xxx_messageInfo_NonFinites proto.InternalMessageInfo - -func (m *NonFinites) GetFNan() float32 { - if m != nil && m.FNan != nil { - return *m.FNan - } - return 0 -} - -func (m *NonFinites) GetFPinf() float32 { - if m != nil && m.FPinf != nil { - return *m.FPinf - } - return 0 -} - -func (m *NonFinites) GetFNinf() float32 { - if m != nil && m.FNinf != nil { - return *m.FNinf - } - return 0 -} - -func (m *NonFinites) GetDNan() float64 { - if m != nil && m.DNan != nil { - return *m.DNan - } - return 0 -} - -func (m *NonFinites) GetDPinf() float64 { - if m != nil && m.DPinf != nil { - return *m.DPinf - } - return 0 -} - -func (m *NonFinites) GetDNinf() float64 { - if m != nil && m.DNinf != nil { - return *m.DNinf - } - return 0 -} - -// Test message for holding repeated primitives. -type Repeats struct { - RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"` - RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"` - RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"` - RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"` - RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"` - RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"` - RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"` - RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"` - RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"` - RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"` - RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Repeats) Reset() { *m = Repeats{} } -func (m *Repeats) String() string { return proto.CompactTextString(m) } -func (*Repeats) ProtoMessage() {} -func (*Repeats) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{2} -} -func (m *Repeats) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Repeats.Unmarshal(m, b) -} -func (m *Repeats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Repeats.Marshal(b, m, deterministic) -} -func (dst *Repeats) XXX_Merge(src proto.Message) { - xxx_messageInfo_Repeats.Merge(dst, src) -} -func (m *Repeats) XXX_Size() int { - return xxx_messageInfo_Repeats.Size(m) -} -func (m *Repeats) XXX_DiscardUnknown() { - xxx_messageInfo_Repeats.DiscardUnknown(m) -} - -var xxx_messageInfo_Repeats proto.InternalMessageInfo - -func (m *Repeats) GetRBool() []bool { - if m != nil { - return m.RBool - } - return nil -} - -func (m *Repeats) GetRInt32() []int32 { - if m != nil { - return m.RInt32 - } - return nil -} - -func (m *Repeats) GetRInt64() []int64 { - if m != nil { - return m.RInt64 - } - return nil -} - -func (m *Repeats) GetRUint32() []uint32 { - if m != nil { - return m.RUint32 - } - return nil -} - -func (m *Repeats) GetRUint64() []uint64 { - if m != nil { - return m.RUint64 - } - return nil -} - -func (m *Repeats) GetRSint32() []int32 { - if m != nil { - return m.RSint32 - } - return nil -} - -func (m *Repeats) GetRSint64() []int64 { - if m != nil { - return m.RSint64 - } - return nil -} - -func (m *Repeats) GetRFloat() []float32 { - if m != nil { - return m.RFloat - } - return nil -} - -func (m *Repeats) GetRDouble() []float64 { - if m != nil { - return m.RDouble - } - return nil -} - -func (m *Repeats) GetRString() []string { - if m != nil { - return m.RString - } - return nil -} - -func (m *Repeats) GetRBytes() [][]byte { - if m != nil { - return m.RBytes - } - return nil -} - -// Test message for holding enums and nested messages. -type Widget struct { - Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"` - RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` - Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"` - RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"` - Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"` - RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Widget) Reset() { *m = Widget{} } -func (m *Widget) String() string { return proto.CompactTextString(m) } -func (*Widget) ProtoMessage() {} -func (*Widget) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{3} -} -func (m *Widget) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Widget.Unmarshal(m, b) -} -func (m *Widget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Widget.Marshal(b, m, deterministic) -} -func (dst *Widget) XXX_Merge(src proto.Message) { - xxx_messageInfo_Widget.Merge(dst, src) -} -func (m *Widget) XXX_Size() int { - return xxx_messageInfo_Widget.Size(m) -} -func (m *Widget) XXX_DiscardUnknown() { - xxx_messageInfo_Widget.DiscardUnknown(m) -} - -var xxx_messageInfo_Widget proto.InternalMessageInfo - -func (m *Widget) GetColor() Widget_Color { - if m != nil && m.Color != nil { - return *m.Color - } - return Widget_RED -} - -func (m *Widget) GetRColor() []Widget_Color { - if m != nil { - return m.RColor - } - return nil -} - -func (m *Widget) GetSimple() *Simple { - if m != nil { - return m.Simple - } - return nil -} - -func (m *Widget) GetRSimple() []*Simple { - if m != nil { - return m.RSimple - } - return nil -} - -func (m *Widget) GetRepeats() *Repeats { - if m != nil { - return m.Repeats - } - return nil -} - -func (m *Widget) GetRRepeats() []*Repeats { - if m != nil { - return m.RRepeats - } - return nil -} - -type Maps struct { - MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Maps) Reset() { *m = Maps{} } -func (m *Maps) String() string { return proto.CompactTextString(m) } -func (*Maps) ProtoMessage() {} -func (*Maps) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{4} -} -func (m *Maps) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Maps.Unmarshal(m, b) -} -func (m *Maps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Maps.Marshal(b, m, deterministic) -} -func (dst *Maps) XXX_Merge(src proto.Message) { - xxx_messageInfo_Maps.Merge(dst, src) -} -func (m *Maps) XXX_Size() int { - return xxx_messageInfo_Maps.Size(m) -} -func (m *Maps) XXX_DiscardUnknown() { - xxx_messageInfo_Maps.DiscardUnknown(m) -} - -var xxx_messageInfo_Maps proto.InternalMessageInfo - -func (m *Maps) GetMInt64Str() map[int64]string { - if m != nil { - return m.MInt64Str - } - return nil -} - -func (m *Maps) GetMBoolSimple() map[bool]*Simple { - if m != nil { - return m.MBoolSimple - } - return nil -} - -type MsgWithOneof struct { - // Types that are valid to be assigned to Union: - // *MsgWithOneof_Title - // *MsgWithOneof_Salary - // *MsgWithOneof_Country - // *MsgWithOneof_HomeAddress - // *MsgWithOneof_MsgWithRequired - Union isMsgWithOneof_Union `protobuf_oneof:"union"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} } -func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) } -func (*MsgWithOneof) ProtoMessage() {} -func (*MsgWithOneof) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{5} -} -func (m *MsgWithOneof) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MsgWithOneof.Unmarshal(m, b) -} -func (m *MsgWithOneof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MsgWithOneof.Marshal(b, m, deterministic) -} -func (dst *MsgWithOneof) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithOneof.Merge(dst, src) -} -func (m *MsgWithOneof) XXX_Size() int { - return xxx_messageInfo_MsgWithOneof.Size(m) -} -func (m *MsgWithOneof) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithOneof.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithOneof proto.InternalMessageInfo - -type isMsgWithOneof_Union interface { - isMsgWithOneof_Union() -} - -type MsgWithOneof_Title struct { - Title string `protobuf:"bytes,1,opt,name=title,oneof"` -} - -type MsgWithOneof_Salary struct { - Salary int64 `protobuf:"varint,2,opt,name=salary,oneof"` -} - -type MsgWithOneof_Country struct { - Country string `protobuf:"bytes,3,opt,name=Country,oneof"` -} - -type MsgWithOneof_HomeAddress struct { - HomeAddress string `protobuf:"bytes,4,opt,name=home_address,json=homeAddress,oneof"` -} - -type MsgWithOneof_MsgWithRequired struct { - MsgWithRequired *MsgWithRequired `protobuf:"bytes,5,opt,name=msg_with_required,json=msgWithRequired,oneof"` -} - -func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} - -func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} - -func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} - -func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {} - -func (*MsgWithOneof_MsgWithRequired) isMsgWithOneof_Union() {} - -func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union { - if m != nil { - return m.Union - } - return nil -} - -func (m *MsgWithOneof) GetTitle() string { - if x, ok := m.GetUnion().(*MsgWithOneof_Title); ok { - return x.Title - } - return "" -} - -func (m *MsgWithOneof) GetSalary() int64 { - if x, ok := m.GetUnion().(*MsgWithOneof_Salary); ok { - return x.Salary - } - return 0 -} - -func (m *MsgWithOneof) GetCountry() string { - if x, ok := m.GetUnion().(*MsgWithOneof_Country); ok { - return x.Country - } - return "" -} - -func (m *MsgWithOneof) GetHomeAddress() string { - if x, ok := m.GetUnion().(*MsgWithOneof_HomeAddress); ok { - return x.HomeAddress - } - return "" -} - -func (m *MsgWithOneof) GetMsgWithRequired() *MsgWithRequired { - if x, ok := m.GetUnion().(*MsgWithOneof_MsgWithRequired); ok { - return x.MsgWithRequired - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{ - (*MsgWithOneof_Title)(nil), - (*MsgWithOneof_Salary)(nil), - (*MsgWithOneof_Country)(nil), - (*MsgWithOneof_HomeAddress)(nil), - (*MsgWithOneof_MsgWithRequired)(nil), - } -} - -func _MsgWithOneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*MsgWithOneof) - // union - switch x := m.Union.(type) { - case *MsgWithOneof_Title: - b.EncodeVarint(1<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Title) - case *MsgWithOneof_Salary: - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Salary)) - case *MsgWithOneof_Country: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Country) - case *MsgWithOneof_HomeAddress: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeStringBytes(x.HomeAddress) - case *MsgWithOneof_MsgWithRequired: - b.EncodeVarint(5<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.MsgWithRequired); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x) - } - return nil -} - -func _MsgWithOneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*MsgWithOneof) - switch tag { - case 1: // union.title - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &MsgWithOneof_Title{x} - return true, err - case 2: // union.salary - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &MsgWithOneof_Salary{int64(x)} - return true, err - case 3: // union.Country - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &MsgWithOneof_Country{x} - return true, err - case 4: // union.home_address - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &MsgWithOneof_HomeAddress{x} - return true, err - case 5: // union.msg_with_required - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(MsgWithRequired) - err := b.DecodeMessage(msg) - m.Union = &MsgWithOneof_MsgWithRequired{msg} - return true, err - default: - return false, nil - } -} - -func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) { - m := msg.(*MsgWithOneof) - // union - switch x := m.Union.(type) { - case *MsgWithOneof_Title: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Title))) - n += len(x.Title) - case *MsgWithOneof_Salary: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.Salary)) - case *MsgWithOneof_Country: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Country))) - n += len(x.Country) - case *MsgWithOneof_HomeAddress: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.HomeAddress))) - n += len(x.HomeAddress) - case *MsgWithOneof_MsgWithRequired: - s := proto.Size(x.MsgWithRequired) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Real struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Real) Reset() { *m = Real{} } -func (m *Real) String() string { return proto.CompactTextString(m) } -func (*Real) ProtoMessage() {} -func (*Real) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{6} -} - -var extRange_Real = []proto.ExtensionRange{ - {Start: 100, End: 536870911}, -} - -func (*Real) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_Real -} -func (m *Real) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Real.Unmarshal(m, b) -} -func (m *Real) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Real.Marshal(b, m, deterministic) -} -func (dst *Real) XXX_Merge(src proto.Message) { - xxx_messageInfo_Real.Merge(dst, src) -} -func (m *Real) XXX_Size() int { - return xxx_messageInfo_Real.Size(m) -} -func (m *Real) XXX_DiscardUnknown() { - xxx_messageInfo_Real.DiscardUnknown(m) -} - -var xxx_messageInfo_Real proto.InternalMessageInfo - -func (m *Real) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Complex struct { - Imaginary *float64 `protobuf:"fixed64,1,opt,name=imaginary" json:"imaginary,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Complex) Reset() { *m = Complex{} } -func (m *Complex) String() string { return proto.CompactTextString(m) } -func (*Complex) ProtoMessage() {} -func (*Complex) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{7} -} - -var extRange_Complex = []proto.ExtensionRange{ - {Start: 100, End: 536870911}, -} - -func (*Complex) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_Complex -} -func (m *Complex) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Complex.Unmarshal(m, b) -} -func (m *Complex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Complex.Marshal(b, m, deterministic) -} -func (dst *Complex) XXX_Merge(src proto.Message) { - xxx_messageInfo_Complex.Merge(dst, src) -} -func (m *Complex) XXX_Size() int { - return xxx_messageInfo_Complex.Size(m) -} -func (m *Complex) XXX_DiscardUnknown() { - xxx_messageInfo_Complex.DiscardUnknown(m) -} - -var xxx_messageInfo_Complex proto.InternalMessageInfo - -func (m *Complex) GetImaginary() float64 { - if m != nil && m.Imaginary != nil { - return *m.Imaginary - } - return 0 -} - -var E_Complex_RealExtension = &proto.ExtensionDesc{ - ExtendedType: (*Real)(nil), - ExtensionType: (*Complex)(nil), - Field: 123, - Name: "jsonpb.Complex.real_extension", - Tag: "bytes,123,opt,name=real_extension,json=realExtension", - Filename: "test_objects.proto", -} - -type KnownTypes struct { - An *any.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"` - Dur *duration.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"` - St *_struct.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"` - Ts *timestamp.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"` - Lv *_struct.ListValue `protobuf:"bytes,15,opt,name=lv" json:"lv,omitempty"` - Val *_struct.Value `protobuf:"bytes,16,opt,name=val" json:"val,omitempty"` - Dbl *wrappers.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"` - Flt *wrappers.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"` - I64 *wrappers.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"` - U64 *wrappers.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"` - I32 *wrappers.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"` - U32 *wrappers.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"` - Bool *wrappers.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"` - Str *wrappers.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"` - Bytes *wrappers.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KnownTypes) Reset() { *m = KnownTypes{} } -func (m *KnownTypes) String() string { return proto.CompactTextString(m) } -func (*KnownTypes) ProtoMessage() {} -func (*KnownTypes) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{8} -} -func (m *KnownTypes) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KnownTypes.Unmarshal(m, b) -} -func (m *KnownTypes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KnownTypes.Marshal(b, m, deterministic) -} -func (dst *KnownTypes) XXX_Merge(src proto.Message) { - xxx_messageInfo_KnownTypes.Merge(dst, src) -} -func (m *KnownTypes) XXX_Size() int { - return xxx_messageInfo_KnownTypes.Size(m) -} -func (m *KnownTypes) XXX_DiscardUnknown() { - xxx_messageInfo_KnownTypes.DiscardUnknown(m) -} - -var xxx_messageInfo_KnownTypes proto.InternalMessageInfo - -func (m *KnownTypes) GetAn() *any.Any { - if m != nil { - return m.An - } - return nil -} - -func (m *KnownTypes) GetDur() *duration.Duration { - if m != nil { - return m.Dur - } - return nil -} - -func (m *KnownTypes) GetSt() *_struct.Struct { - if m != nil { - return m.St - } - return nil -} - -func (m *KnownTypes) GetTs() *timestamp.Timestamp { - if m != nil { - return m.Ts - } - return nil -} - -func (m *KnownTypes) GetLv() *_struct.ListValue { - if m != nil { - return m.Lv - } - return nil -} - -func (m *KnownTypes) GetVal() *_struct.Value { - if m != nil { - return m.Val - } - return nil -} - -func (m *KnownTypes) GetDbl() *wrappers.DoubleValue { - if m != nil { - return m.Dbl - } - return nil -} - -func (m *KnownTypes) GetFlt() *wrappers.FloatValue { - if m != nil { - return m.Flt - } - return nil -} - -func (m *KnownTypes) GetI64() *wrappers.Int64Value { - if m != nil { - return m.I64 - } - return nil -} - -func (m *KnownTypes) GetU64() *wrappers.UInt64Value { - if m != nil { - return m.U64 - } - return nil -} - -func (m *KnownTypes) GetI32() *wrappers.Int32Value { - if m != nil { - return m.I32 - } - return nil -} - -func (m *KnownTypes) GetU32() *wrappers.UInt32Value { - if m != nil { - return m.U32 - } - return nil -} - -func (m *KnownTypes) GetBool() *wrappers.BoolValue { - if m != nil { - return m.Bool - } - return nil -} - -func (m *KnownTypes) GetStr() *wrappers.StringValue { - if m != nil { - return m.Str - } - return nil -} - -func (m *KnownTypes) GetBytes() *wrappers.BytesValue { - if m != nil { - return m.Bytes - } - return nil -} - -// Test messages for marshaling/unmarshaling required fields. -type MsgWithRequired struct { - Str *string `protobuf:"bytes,1,req,name=str" json:"str,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MsgWithRequired) Reset() { *m = MsgWithRequired{} } -func (m *MsgWithRequired) String() string { return proto.CompactTextString(m) } -func (*MsgWithRequired) ProtoMessage() {} -func (*MsgWithRequired) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{9} -} -func (m *MsgWithRequired) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MsgWithRequired.Unmarshal(m, b) -} -func (m *MsgWithRequired) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MsgWithRequired.Marshal(b, m, deterministic) -} -func (dst *MsgWithRequired) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithRequired.Merge(dst, src) -} -func (m *MsgWithRequired) XXX_Size() int { - return xxx_messageInfo_MsgWithRequired.Size(m) -} -func (m *MsgWithRequired) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithRequired.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithRequired proto.InternalMessageInfo - -func (m *MsgWithRequired) GetStr() string { - if m != nil && m.Str != nil { - return *m.Str - } - return "" -} - -type MsgWithIndirectRequired struct { - Subm *MsgWithRequired `protobuf:"bytes,1,opt,name=subm" json:"subm,omitempty"` - MapField map[string]*MsgWithRequired `protobuf:"bytes,2,rep,name=map_field,json=mapField" json:"map_field,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - SliceField []*MsgWithRequired `protobuf:"bytes,3,rep,name=slice_field,json=sliceField" json:"slice_field,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MsgWithIndirectRequired) Reset() { *m = MsgWithIndirectRequired{} } -func (m *MsgWithIndirectRequired) String() string { return proto.CompactTextString(m) } -func (*MsgWithIndirectRequired) ProtoMessage() {} -func (*MsgWithIndirectRequired) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{10} -} -func (m *MsgWithIndirectRequired) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MsgWithIndirectRequired.Unmarshal(m, b) -} -func (m *MsgWithIndirectRequired) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MsgWithIndirectRequired.Marshal(b, m, deterministic) -} -func (dst *MsgWithIndirectRequired) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithIndirectRequired.Merge(dst, src) -} -func (m *MsgWithIndirectRequired) XXX_Size() int { - return xxx_messageInfo_MsgWithIndirectRequired.Size(m) -} -func (m *MsgWithIndirectRequired) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithIndirectRequired.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithIndirectRequired proto.InternalMessageInfo - -func (m *MsgWithIndirectRequired) GetSubm() *MsgWithRequired { - if m != nil { - return m.Subm - } - return nil -} - -func (m *MsgWithIndirectRequired) GetMapField() map[string]*MsgWithRequired { - if m != nil { - return m.MapField - } - return nil -} - -func (m *MsgWithIndirectRequired) GetSliceField() []*MsgWithRequired { - if m != nil { - return m.SliceField - } - return nil -} - -type MsgWithRequiredBytes struct { - Byts []byte `protobuf:"bytes,1,req,name=byts" json:"byts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MsgWithRequiredBytes) Reset() { *m = MsgWithRequiredBytes{} } -func (m *MsgWithRequiredBytes) String() string { return proto.CompactTextString(m) } -func (*MsgWithRequiredBytes) ProtoMessage() {} -func (*MsgWithRequiredBytes) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{11} -} -func (m *MsgWithRequiredBytes) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MsgWithRequiredBytes.Unmarshal(m, b) -} -func (m *MsgWithRequiredBytes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MsgWithRequiredBytes.Marshal(b, m, deterministic) -} -func (dst *MsgWithRequiredBytes) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithRequiredBytes.Merge(dst, src) -} -func (m *MsgWithRequiredBytes) XXX_Size() int { - return xxx_messageInfo_MsgWithRequiredBytes.Size(m) -} -func (m *MsgWithRequiredBytes) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithRequiredBytes.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithRequiredBytes proto.InternalMessageInfo - -func (m *MsgWithRequiredBytes) GetByts() []byte { - if m != nil { - return m.Byts - } - return nil -} - -type MsgWithRequiredWKT struct { - Str *wrappers.StringValue `protobuf:"bytes,1,req,name=str" json:"str,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MsgWithRequiredWKT) Reset() { *m = MsgWithRequiredWKT{} } -func (m *MsgWithRequiredWKT) String() string { return proto.CompactTextString(m) } -func (*MsgWithRequiredWKT) ProtoMessage() {} -func (*MsgWithRequiredWKT) Descriptor() ([]byte, []int) { - return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{12} -} -func (m *MsgWithRequiredWKT) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MsgWithRequiredWKT.Unmarshal(m, b) -} -func (m *MsgWithRequiredWKT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MsgWithRequiredWKT.Marshal(b, m, deterministic) -} -func (dst *MsgWithRequiredWKT) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithRequiredWKT.Merge(dst, src) -} -func (m *MsgWithRequiredWKT) XXX_Size() int { - return xxx_messageInfo_MsgWithRequiredWKT.Size(m) -} -func (m *MsgWithRequiredWKT) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithRequiredWKT.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithRequiredWKT proto.InternalMessageInfo - -func (m *MsgWithRequiredWKT) GetStr() *wrappers.StringValue { - if m != nil { - return m.Str - } - return nil -} - -var E_Name = &proto.ExtensionDesc{ - ExtendedType: (*Real)(nil), - ExtensionType: (*string)(nil), - Field: 124, - Name: "jsonpb.name", - Tag: "bytes,124,opt,name=name", - Filename: "test_objects.proto", -} - -var E_Extm = &proto.ExtensionDesc{ - ExtendedType: (*Real)(nil), - ExtensionType: (*MsgWithRequired)(nil), - Field: 125, - Name: "jsonpb.extm", - Tag: "bytes,125,opt,name=extm", - Filename: "test_objects.proto", -} - -func init() { - proto.RegisterType((*Simple)(nil), "jsonpb.Simple") - proto.RegisterType((*NonFinites)(nil), "jsonpb.NonFinites") - proto.RegisterType((*Repeats)(nil), "jsonpb.Repeats") - proto.RegisterType((*Widget)(nil), "jsonpb.Widget") - proto.RegisterType((*Maps)(nil), "jsonpb.Maps") - proto.RegisterMapType((map[bool]*Simple)(nil), "jsonpb.Maps.MBoolSimpleEntry") - proto.RegisterMapType((map[int64]string)(nil), "jsonpb.Maps.MInt64StrEntry") - proto.RegisterType((*MsgWithOneof)(nil), "jsonpb.MsgWithOneof") - proto.RegisterType((*Real)(nil), "jsonpb.Real") - proto.RegisterType((*Complex)(nil), "jsonpb.Complex") - proto.RegisterType((*KnownTypes)(nil), "jsonpb.KnownTypes") - proto.RegisterType((*MsgWithRequired)(nil), "jsonpb.MsgWithRequired") - proto.RegisterType((*MsgWithIndirectRequired)(nil), "jsonpb.MsgWithIndirectRequired") - proto.RegisterMapType((map[string]*MsgWithRequired)(nil), "jsonpb.MsgWithIndirectRequired.MapFieldEntry") - proto.RegisterType((*MsgWithRequiredBytes)(nil), "jsonpb.MsgWithRequiredBytes") - proto.RegisterType((*MsgWithRequiredWKT)(nil), "jsonpb.MsgWithRequiredWKT") - proto.RegisterEnum("jsonpb.Widget_Color", Widget_Color_name, Widget_Color_value) - proto.RegisterExtension(E_Complex_RealExtension) - proto.RegisterExtension(E_Name) - proto.RegisterExtension(E_Extm) -} - -func init() { proto.RegisterFile("test_objects.proto", fileDescriptor_test_objects_a4d3e593ea3c686f) } - -var fileDescriptor_test_objects_a4d3e593ea3c686f = []byte{ - // 1460 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xdd, 0x72, 0xdb, 0x44, - 0x14, 0x8e, 0x24, 0xcb, 0xb6, 0x8e, 0xf3, 0xd7, 0x6d, 0xda, 0x2a, 0xa1, 0x14, 0x8d, 0x5b, 0x8a, - 0x69, 0x89, 0x3b, 0x38, 0x1e, 0x4f, 0x29, 0xdc, 0x34, 0x4d, 0x4a, 0x4b, 0xdb, 0xc0, 0x6c, 0x52, - 0x7a, 0xe9, 0x91, 0x23, 0x39, 0x55, 0x91, 0xb4, 0x66, 0x77, 0x9d, 0xd4, 0x03, 0xcc, 0xe4, 0x19, - 0x18, 0x9e, 0x80, 0x0b, 0x6e, 0xb9, 0xe3, 0x82, 0xb7, 0xe0, 0x8d, 0x98, 0x3d, 0xbb, 0xf2, 0x5f, - 0xe2, 0x81, 0x2b, 0x7b, 0xf7, 0xfb, 0xd9, 0xd5, 0x9e, 0x4f, 0x67, 0x05, 0x44, 0xc6, 0x42, 0x76, - 0x59, 0xef, 0x5d, 0x7c, 0x2c, 0x45, 0x73, 0xc0, 0x99, 0x64, 0xa4, 0xfc, 0x4e, 0xb0, 0x7c, 0xd0, - 0xdb, 0xda, 0x3c, 0x61, 0xec, 0x24, 0x8d, 0x1f, 0xe0, 0x6c, 0x6f, 0xd8, 0x7f, 0x10, 0xe6, 0x23, - 0x4d, 0xd9, 0xba, 0x35, 0x0f, 0x45, 0x43, 0x1e, 0xca, 0x84, 0xe5, 0x06, 0xbf, 0x39, 0x8f, 0x0b, - 0xc9, 0x87, 0xc7, 0xd2, 0xa0, 0x1f, 0xcd, 0xa3, 0x32, 0xc9, 0x62, 0x21, 0xc3, 0x6c, 0xb0, 0xc8, - 0xfe, 0x8c, 0x87, 0x83, 0x41, 0xcc, 0xcd, 0x0e, 0xeb, 0x7f, 0x96, 0xa0, 0x7c, 0x98, 0x64, 0x83, - 0x34, 0x26, 0xd7, 0xa0, 0xcc, 0xba, 0x3d, 0xc6, 0x52, 0xdf, 0x0a, 0xac, 0x46, 0x95, 0xba, 0x6c, - 0x97, 0xb1, 0x94, 0xdc, 0x80, 0x0a, 0xeb, 0x26, 0xb9, 0xdc, 0x69, 0xf9, 0x76, 0x60, 0x35, 0x5c, - 0x5a, 0x66, 0xcf, 0xd5, 0x88, 0xdc, 0x82, 0x9a, 0x01, 0xba, 0x42, 0x72, 0xdf, 0x41, 0xd0, 0xd3, - 0xe0, 0xa1, 0xe4, 0x63, 0x61, 0xa7, 0xed, 0x97, 0x02, 0xab, 0xe1, 0x68, 0x61, 0xa7, 0x3d, 0x16, - 0x76, 0xda, 0x28, 0x74, 0x11, 0xf4, 0x34, 0xa8, 0x84, 0x9b, 0x50, 0x65, 0xdd, 0xa1, 0x5e, 0xb2, - 0x1c, 0x58, 0x8d, 0x15, 0x5a, 0x61, 0xaf, 0x71, 0x48, 0x02, 0x58, 0x2e, 0x20, 0xd4, 0x56, 0x10, - 0x06, 0x03, 0xcf, 0x88, 0x3b, 0x6d, 0xbf, 0x1a, 0x58, 0x8d, 0x92, 0x11, 0x77, 0xda, 0x13, 0xb1, - 0x59, 0xd8, 0x43, 0x18, 0x0c, 0x3c, 0x16, 0x0b, 0xbd, 0x32, 0x04, 0x56, 0xe3, 0x0a, 0xad, 0xb0, - 0xc3, 0xa9, 0x95, 0xc5, 0x64, 0xe5, 0x1a, 0xc2, 0x60, 0xe0, 0x19, 0x71, 0xa7, 0xed, 0x2f, 0x07, - 0x56, 0x83, 0x18, 0x71, 0xb1, 0xb2, 0x98, 0xac, 0xbc, 0x82, 0x30, 0x18, 0x78, 0x7c, 0x58, 0xfd, - 0x94, 0x85, 0xd2, 0x5f, 0x0d, 0xac, 0x86, 0x4d, 0xcb, 0xec, 0xa9, 0x1a, 0xe9, 0xc3, 0x42, 0x00, - 0x95, 0x6b, 0x08, 0x7a, 0x1a, 0x1c, 0xaf, 0x1a, 0xb1, 0x61, 0x2f, 0x8d, 0xfd, 0xf5, 0xc0, 0x6a, - 0x58, 0xb4, 0xc2, 0xf6, 0x70, 0xa8, 0x57, 0xd5, 0x10, 0x6a, 0xaf, 0x20, 0x0c, 0x06, 0x9e, 0x6c, - 0x59, 0xf2, 0x24, 0x3f, 0xf1, 0x49, 0x60, 0x35, 0x3c, 0xb5, 0x65, 0x1c, 0xea, 0x0d, 0xf5, 0x46, - 0x32, 0x16, 0xfe, 0xd5, 0xc0, 0x6a, 0x2c, 0xd3, 0x32, 0xdb, 0x55, 0xa3, 0xfa, 0xaf, 0x16, 0xc0, - 0x01, 0xcb, 0x9f, 0x26, 0x79, 0x22, 0x63, 0x41, 0xae, 0x82, 0xdb, 0xef, 0xe6, 0x61, 0x8e, 0xa1, - 0xb1, 0x69, 0xa9, 0x7f, 0x10, 0xe6, 0x2a, 0x4a, 0xfd, 0xee, 0x20, 0xc9, 0xfb, 0x18, 0x19, 0x9b, - 0xba, 0xfd, 0xef, 0x92, 0xbc, 0xaf, 0xa7, 0x73, 0x35, 0xed, 0x98, 0xe9, 0x03, 0x35, 0x7d, 0x15, - 0xdc, 0x08, 0x2d, 0x4a, 0xb8, 0xc1, 0x52, 0x64, 0x2c, 0x22, 0x6d, 0xe1, 0xe2, 0xac, 0x1b, 0x15, - 0x16, 0x91, 0xb6, 0x28, 0x9b, 0x69, 0x65, 0x51, 0xff, 0xc3, 0x86, 0x0a, 0x8d, 0x07, 0x71, 0x28, - 0x85, 0xa2, 0xf0, 0x22, 0xc7, 0x8e, 0xca, 0x31, 0x2f, 0x72, 0xcc, 0xc7, 0x39, 0x76, 0x54, 0x8e, - 0xb9, 0xce, 0x71, 0x01, 0x74, 0xda, 0xbe, 0x13, 0x38, 0x2a, 0xa7, 0x5c, 0xe7, 0x74, 0x13, 0xaa, - 0xbc, 0xc8, 0x61, 0x29, 0x70, 0x54, 0x0e, 0xb9, 0xc9, 0xe1, 0x18, 0xea, 0xb4, 0x7d, 0x37, 0x70, - 0x54, 0xca, 0xb8, 0x49, 0x19, 0x42, 0xa2, 0x48, 0xaf, 0xa3, 0x32, 0xc4, 0x0f, 0xa7, 0x54, 0x26, - 0x21, 0x95, 0xc0, 0x51, 0x09, 0xe1, 0x26, 0x21, 0xb8, 0x09, 0x5d, 0xff, 0x6a, 0xe0, 0xa8, 0xfa, - 0x73, 0x5d, 0x7f, 0xd4, 0x98, 0xfa, 0x7a, 0x81, 0xa3, 0xea, 0xcb, 0x4d, 0x7d, 0xb5, 0x9d, 0xae, - 0x1e, 0x04, 0x8e, 0xaa, 0x1e, 0x9f, 0x54, 0x8f, 0x9b, 0xea, 0xd5, 0x02, 0x47, 0x55, 0x8f, 0xeb, - 0xea, 0xfd, 0x65, 0x43, 0xf9, 0x4d, 0x12, 0x9d, 0xc4, 0x92, 0xdc, 0x03, 0xf7, 0x98, 0xa5, 0x8c, - 0x63, 0xe5, 0x56, 0x5b, 0x1b, 0x4d, 0xdd, 0xac, 0x9a, 0x1a, 0x6e, 0x3e, 0x51, 0x18, 0xd5, 0x14, - 0xb2, 0xad, 0xfc, 0x34, 0x5b, 0x1d, 0xde, 0x22, 0x76, 0x99, 0xe3, 0x2f, 0xb9, 0x0b, 0x65, 0x81, - 0x4d, 0x05, 0xdf, 0xa2, 0x5a, 0x6b, 0xb5, 0x60, 0xeb, 0x56, 0x43, 0x0d, 0x4a, 0x3e, 0xd5, 0x07, - 0x82, 0x4c, 0xb5, 0xcf, 0x8b, 0x4c, 0x75, 0x40, 0x86, 0x5a, 0xe1, 0xba, 0xc0, 0xfe, 0x06, 0x7a, - 0xae, 0x15, 0x4c, 0x53, 0x77, 0x5a, 0xe0, 0xe4, 0x33, 0xf0, 0x78, 0xb7, 0x20, 0x5f, 0x43, 0xdb, - 0x0b, 0xe4, 0x2a, 0x37, 0xff, 0xea, 0x1f, 0x83, 0xab, 0x37, 0x5d, 0x01, 0x87, 0xee, 0xef, 0xad, - 0x2f, 0x11, 0x0f, 0xdc, 0xaf, 0xe9, 0xfe, 0xfe, 0xc1, 0xba, 0x45, 0xaa, 0x50, 0xda, 0x7d, 0xf9, - 0x7a, 0x7f, 0xdd, 0xae, 0xff, 0x66, 0x43, 0xe9, 0x55, 0x38, 0x10, 0xe4, 0x4b, 0xa8, 0x65, 0x53, - 0xdd, 0xcb, 0x42, 0xff, 0x0f, 0x0a, 0x7f, 0x45, 0x69, 0xbe, 0x2a, 0x5a, 0xd9, 0x7e, 0x2e, 0xf9, - 0x88, 0x7a, 0xd9, 0xb8, 0xb5, 0x3d, 0x86, 0x95, 0x0c, 0xb3, 0x59, 0x3c, 0xb5, 0x8d, 0xf2, 0x0f, - 0x67, 0xe5, 0x2a, 0xaf, 0xfa, 0xb1, 0xb5, 0x41, 0x2d, 0x9b, 0xcc, 0x6c, 0x7d, 0x05, 0xab, 0xb3, - 0xfe, 0x64, 0x1d, 0x9c, 0x1f, 0xe2, 0x11, 0x96, 0xd1, 0xa1, 0xea, 0x2f, 0xd9, 0x00, 0xf7, 0x34, - 0x4c, 0x87, 0x31, 0xbe, 0x7e, 0x1e, 0xd5, 0x83, 0x47, 0xf6, 0x43, 0x6b, 0xeb, 0x00, 0xd6, 0xe7, - 0xed, 0xa7, 0xf5, 0x55, 0xad, 0xbf, 0x33, 0xad, 0xbf, 0x58, 0x94, 0x89, 0x5f, 0xfd, 0x1f, 0x0b, - 0x96, 0x5f, 0x89, 0x93, 0x37, 0x89, 0x7c, 0xfb, 0x6d, 0x1e, 0xb3, 0x3e, 0xb9, 0x0e, 0xae, 0x4c, - 0x64, 0x1a, 0xa3, 0x9d, 0xf7, 0x6c, 0x89, 0xea, 0x21, 0xf1, 0xa1, 0x2c, 0xc2, 0x34, 0xe4, 0x23, - 0xf4, 0x74, 0x9e, 0x2d, 0x51, 0x33, 0x26, 0x5b, 0x50, 0x79, 0xc2, 0x86, 0x6a, 0x27, 0xd8, 0x16, - 0x94, 0xa6, 0x98, 0x20, 0xb7, 0x61, 0xf9, 0x2d, 0xcb, 0xe2, 0x6e, 0x18, 0x45, 0x3c, 0x16, 0x02, - 0x3b, 0x84, 0x22, 0xd4, 0xd4, 0xec, 0x63, 0x3d, 0x49, 0xf6, 0xe1, 0x4a, 0x26, 0x4e, 0xba, 0x67, - 0x89, 0x7c, 0xdb, 0xe5, 0xf1, 0x8f, 0xc3, 0x84, 0xc7, 0x11, 0x76, 0x8d, 0x5a, 0xeb, 0xc6, 0xf8, - 0x60, 0xf5, 0x1e, 0xa9, 0x81, 0x9f, 0x2d, 0xd1, 0xb5, 0x6c, 0x76, 0x6a, 0xb7, 0x02, 0xee, 0x30, - 0x4f, 0x58, 0x5e, 0xbf, 0x0b, 0x25, 0x1a, 0x87, 0xe9, 0xe4, 0x14, 0x2d, 0xdd, 0x6a, 0x70, 0x70, - 0xaf, 0x5a, 0x8d, 0xd6, 0xcf, 0xcf, 0xcf, 0xcf, 0xed, 0xfa, 0x99, 0xda, 0xb8, 0x3a, 0x90, 0xf7, - 0xe4, 0x26, 0x78, 0x49, 0x16, 0x9e, 0x24, 0xb9, 0x7a, 0x40, 0x4d, 0x9f, 0x4c, 0x4c, 0x24, 0xad, - 0x3d, 0x58, 0xe5, 0x71, 0x98, 0x76, 0xe3, 0xf7, 0x32, 0xce, 0x45, 0xc2, 0x72, 0xb2, 0x3c, 0x49, - 0x66, 0x98, 0xfa, 0x3f, 0xcd, 0x46, 0xdb, 0xd8, 0xd3, 0x15, 0x25, 0xda, 0x2f, 0x34, 0xf5, 0xbf, - 0x5d, 0x80, 0x17, 0x39, 0x3b, 0xcb, 0x8f, 0x46, 0x83, 0x58, 0x90, 0x3b, 0x60, 0x87, 0x39, 0x5e, - 0x1b, 0xb5, 0xd6, 0x46, 0x53, 0x5f, 0xf8, 0xcd, 0xe2, 0xc2, 0x6f, 0x3e, 0xce, 0x47, 0xd4, 0x0e, - 0x73, 0x72, 0x1f, 0x9c, 0x68, 0xa8, 0x5f, 0xf6, 0x5a, 0x6b, 0xf3, 0x02, 0x6d, 0xcf, 0x7c, 0x76, - 0x50, 0xc5, 0x22, 0x9f, 0x80, 0x2d, 0x24, 0xde, 0x62, 0xea, 0x0c, 0xe7, 0xb9, 0x87, 0xf8, 0x09, - 0x42, 0x6d, 0xa1, 0x9a, 0x88, 0x2d, 0x85, 0x89, 0xc9, 0xd6, 0x05, 0xe2, 0x51, 0xf1, 0x35, 0x42, - 0x6d, 0x29, 0x14, 0x37, 0x3d, 0xc5, 0x1b, 0xec, 0x32, 0xee, 0xcb, 0x44, 0xc8, 0xef, 0xd5, 0x09, - 0x53, 0x3b, 0x3d, 0x25, 0x0d, 0x70, 0x4e, 0xc3, 0x14, 0x6f, 0xb4, 0x5a, 0xeb, 0xfa, 0x05, 0xb2, - 0x26, 0x2a, 0x0a, 0x69, 0x82, 0x13, 0xf5, 0x52, 0x8c, 0x4e, 0xad, 0x75, 0xf3, 0xe2, 0x73, 0x61, - 0xaf, 0x34, 0xfc, 0xa8, 0x97, 0x92, 0x6d, 0x70, 0xfa, 0xa9, 0xc4, 0x24, 0xa9, 0xf7, 0x76, 0x9e, - 0x8f, 0x5d, 0xd7, 0xd0, 0xfb, 0xa9, 0x54, 0xf4, 0x04, 0x9b, 0xfc, 0xe5, 0x74, 0x7c, 0x13, 0x0d, - 0x3d, 0xe9, 0xb4, 0xd5, 0x6e, 0x86, 0x9d, 0x36, 0x5e, 0x4e, 0x97, 0xed, 0xe6, 0xf5, 0x34, 0x7f, - 0xd8, 0x69, 0xa3, 0xfd, 0x4e, 0x0b, 0xbf, 0x63, 0x16, 0xd8, 0xef, 0xb4, 0x0a, 0xfb, 0x9d, 0x16, - 0xda, 0xef, 0xb4, 0xf0, 0xc3, 0x66, 0x91, 0xfd, 0x98, 0x3f, 0x44, 0x7e, 0x09, 0x6f, 0x42, 0x6f, - 0xc1, 0xa1, 0xab, 0x56, 0xa0, 0xe9, 0xc8, 0x53, 0xfe, 0xaa, 0xa9, 0xc1, 0x02, 0x7f, 0x7d, 0xbb, - 0x18, 0x7f, 0x21, 0x39, 0xf9, 0x1c, 0xdc, 0xe2, 0x96, 0xb9, 0xfc, 0x01, 0xf0, 0xd6, 0xd1, 0x02, - 0xcd, 0xac, 0xdf, 0x86, 0xb5, 0xb9, 0x97, 0x51, 0x35, 0x20, 0xdd, 0x4a, 0xed, 0x86, 0x87, 0xbe, - 0xf5, 0xdf, 0x6d, 0xb8, 0x61, 0x58, 0xcf, 0xf3, 0x28, 0xe1, 0xf1, 0xb1, 0x1c, 0xb3, 0xef, 0x43, - 0x49, 0x0c, 0x7b, 0x99, 0x49, 0xf2, 0xa2, 0x37, 0x9c, 0x22, 0x89, 0x7c, 0x03, 0x5e, 0x16, 0x0e, - 0xba, 0xfd, 0x24, 0x4e, 0x23, 0xd3, 0x6c, 0xb7, 0xe7, 0x14, 0xf3, 0x0b, 0xa8, 0x26, 0xfc, 0x54, - 0xf1, 0x75, 0xf3, 0xad, 0x66, 0x66, 0x48, 0x1e, 0x42, 0x4d, 0xa4, 0xc9, 0x71, 0x6c, 0xdc, 0x1c, - 0x74, 0x5b, 0xb8, 0x3e, 0x20, 0x17, 0x95, 0x5b, 0x47, 0xb0, 0x32, 0x63, 0x3a, 0xdd, 0x72, 0x3d, - 0xdd, 0x72, 0xb7, 0x67, 0x5b, 0xee, 0x42, 0xdb, 0xa9, 0xde, 0x7b, 0x0f, 0x36, 0xe6, 0x50, 0x3c, - 0x6d, 0x42, 0xa0, 0xd4, 0x1b, 0x49, 0x81, 0xe7, 0xb9, 0x4c, 0xf1, 0x7f, 0x7d, 0x0f, 0xc8, 0x1c, - 0xf7, 0xcd, 0x8b, 0xa3, 0xa2, 0xdc, 0x8a, 0xf8, 0x7f, 0xca, 0xfd, 0x28, 0x80, 0x52, 0x1e, 0x66, - 0xf1, 0x5c, 0xd3, 0xfa, 0x19, 0x9f, 0x02, 0x91, 0x47, 0x5f, 0x40, 0x29, 0x7e, 0x2f, 0xb3, 0x39, - 0xc6, 0x2f, 0xff, 0x51, 0x2a, 0x25, 0xf9, 0x37, 0x00, 0x00, 0xff, 0xff, 0xe9, 0xd4, 0xfd, 0x2f, - 0x41, 0x0d, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto deleted file mode 100644 index e01386e7..00000000 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto +++ /dev/null @@ -1,179 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -package jsonpb; - -// Test message for holding primitive types. -message Simple { - optional bool o_bool = 1; - optional int32 o_int32 = 2; - optional int32 o_int32_str = 3; - optional int64 o_int64 = 4; - optional int64 o_int64_str = 5; - optional uint32 o_uint32 = 6; - optional uint32 o_uint32_str = 7; - optional uint64 o_uint64 = 8; - optional uint64 o_uint64_str = 9; - optional sint32 o_sint32 = 10; - optional sint32 o_sint32_str = 11; - optional sint64 o_sint64 = 12; - optional sint64 o_sint64_str = 13; - optional float o_float = 14; - optional float o_float_str = 15; - optional double o_double = 16; - optional double o_double_str = 17; - optional string o_string = 18; - optional bytes o_bytes = 19; -} - -// Test message for holding special non-finites primitives. -message NonFinites { - optional float f_nan = 1; - optional float f_pinf = 2; - optional float f_ninf = 3; - optional double d_nan = 4; - optional double d_pinf = 5; - optional double d_ninf = 6; -} - -// Test message for holding repeated primitives. -message Repeats { - repeated bool r_bool = 1; - repeated int32 r_int32 = 2; - repeated int64 r_int64 = 3; - repeated uint32 r_uint32 = 4; - repeated uint64 r_uint64 = 5; - repeated sint32 r_sint32 = 6; - repeated sint64 r_sint64 = 7; - repeated float r_float = 8; - repeated double r_double = 9; - repeated string r_string = 10; - repeated bytes r_bytes = 11; -} - -// Test message for holding enums and nested messages. -message Widget { - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - }; - optional Color color = 1; - repeated Color r_color = 2; - - optional Simple simple = 10; - repeated Simple r_simple = 11; - - optional Repeats repeats = 20; - repeated Repeats r_repeats = 21; -} - -message Maps { - map m_int64_str = 1; - map m_bool_simple = 2; -} - -message MsgWithOneof { - oneof union { - string title = 1; - int64 salary = 2; - string Country = 3; - string home_address = 4; - MsgWithRequired msg_with_required = 5; - } -} - -message Real { - optional double value = 1; - extensions 100 to max; -} - -extend Real { - optional string name = 124; -} - -message Complex { - extend Real { - optional Complex real_extension = 123; - } - optional double imaginary = 1; - extensions 100 to max; -} - -message KnownTypes { - optional google.protobuf.Any an = 14; - optional google.protobuf.Duration dur = 1; - optional google.protobuf.Struct st = 12; - optional google.protobuf.Timestamp ts = 2; - optional google.protobuf.ListValue lv = 15; - optional google.protobuf.Value val = 16; - - optional google.protobuf.DoubleValue dbl = 3; - optional google.protobuf.FloatValue flt = 4; - optional google.protobuf.Int64Value i64 = 5; - optional google.protobuf.UInt64Value u64 = 6; - optional google.protobuf.Int32Value i32 = 7; - optional google.protobuf.UInt32Value u32 = 8; - optional google.protobuf.BoolValue bool = 9; - optional google.protobuf.StringValue str = 10; - optional google.protobuf.BytesValue bytes = 11; -} - -// Test messages for marshaling/unmarshaling required fields. -message MsgWithRequired { - required string str = 1; -} - -message MsgWithIndirectRequired { - optional MsgWithRequired subm = 1; - map map_field = 2; - repeated MsgWithRequired slice_field = 3; -} - -message MsgWithRequiredBytes { - required bytes byts = 1; -} - -message MsgWithRequiredWKT { - required google.protobuf.StringValue str = 1; -} - -extend Real { - optional MsgWithRequired extm = 125; -} diff --git a/vendor/github.com/golang/protobuf/proto/all_test.go b/vendor/github.com/golang/protobuf/proto/all_test.go deleted file mode 100644 index 1bea4b6e..00000000 --- a/vendor/github.com/golang/protobuf/proto/all_test.go +++ /dev/null @@ -1,2492 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "math/rand" - "reflect" - "runtime/debug" - "strings" - "sync" - "testing" - "time" - - . "github.com/golang/protobuf/proto" - pb3 "github.com/golang/protobuf/proto/proto3_proto" - . "github.com/golang/protobuf/proto/test_proto" -) - -var globalO *Buffer - -func old() *Buffer { - if globalO == nil { - globalO = NewBuffer(nil) - } - globalO.Reset() - return globalO -} - -func equalbytes(b1, b2 []byte, t *testing.T) { - if len(b1) != len(b2) { - t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) - return - } - for i := 0; i < len(b1); i++ { - if b1[i] != b2[i] { - t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) - } - } -} - -func initGoTestField() *GoTestField { - f := new(GoTestField) - f.Label = String("label") - f.Type = String("type") - return f -} - -// These are all structurally equivalent but the tag numbers differ. -// (It's remarkable that required, optional, and repeated all have -// 8 letters.) -func initGoTest_RequiredGroup() *GoTest_RequiredGroup { - return &GoTest_RequiredGroup{ - RequiredField: String("required"), - } -} - -func initGoTest_OptionalGroup() *GoTest_OptionalGroup { - return &GoTest_OptionalGroup{ - RequiredField: String("optional"), - } -} - -func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { - return &GoTest_RepeatedGroup{ - RequiredField: String("repeated"), - } -} - -func initGoTest(setdefaults bool) *GoTest { - pb := new(GoTest) - if setdefaults { - pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) - pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) - pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) - pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) - pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) - pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) - pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) - pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) - pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) - pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) - pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted - pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) - pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) - pb.F_Sfixed32Defaulted = Int32(Default_GoTest_F_Sfixed32Defaulted) - pb.F_Sfixed64Defaulted = Int64(Default_GoTest_F_Sfixed64Defaulted) - } - - pb.Kind = GoTest_TIME.Enum() - pb.RequiredField = initGoTestField() - pb.F_BoolRequired = Bool(true) - pb.F_Int32Required = Int32(3) - pb.F_Int64Required = Int64(6) - pb.F_Fixed32Required = Uint32(32) - pb.F_Fixed64Required = Uint64(64) - pb.F_Uint32Required = Uint32(3232) - pb.F_Uint64Required = Uint64(6464) - pb.F_FloatRequired = Float32(3232) - pb.F_DoubleRequired = Float64(6464) - pb.F_StringRequired = String("string") - pb.F_BytesRequired = []byte("bytes") - pb.F_Sint32Required = Int32(-32) - pb.F_Sint64Required = Int64(-64) - pb.F_Sfixed32Required = Int32(-32) - pb.F_Sfixed64Required = Int64(-64) - pb.Requiredgroup = initGoTest_RequiredGroup() - - return pb -} - -func hex(c uint8) uint8 { - if '0' <= c && c <= '9' { - return c - '0' - } - if 'a' <= c && c <= 'f' { - return 10 + c - 'a' - } - if 'A' <= c && c <= 'F' { - return 10 + c - 'A' - } - return 0 -} - -func equal(b []byte, s string, t *testing.T) bool { - if 2*len(b) != len(s) { - // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) - fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) - return false - } - for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { - x := hex(s[j])*16 + hex(s[j+1]) - if b[i] != x { - // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) - fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) - return false - } - } - return true -} - -func overify(t *testing.T, pb *GoTest, expected string) { - o := old() - err := o.Marshal(pb) - if err != nil { - fmt.Printf("overify marshal-1 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 1", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = o.Unmarshal(pbd) - if err != nil { - t.Fatalf("overify unmarshal err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - o.Reset() - err = o.Marshal(pbd) - if err != nil { - t.Errorf("overify marshal-2 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 2", o.Bytes()) - t.Fatalf("string = %s", expected) - } -} - -// Simple tests for numeric encode/decode primitives (varint, etc.) -func TestNumericPrimitives(t *testing.T) { - for i := uint64(0); i < 1e6; i += 111 { - o := old() - if o.EncodeVarint(i) != nil { - t.Error("EncodeVarint") - break - } - x, e := o.DecodeVarint() - if e != nil { - t.Fatal("DecodeVarint") - } - if x != i { - t.Fatal("varint decode fail:", i, x) - } - - o = old() - if o.EncodeFixed32(i) != nil { - t.Fatal("encFixed32") - } - x, e = o.DecodeFixed32() - if e != nil { - t.Fatal("decFixed32") - } - if x != i { - t.Fatal("fixed32 decode fail:", i, x) - } - - o = old() - if o.EncodeFixed64(i*1234567) != nil { - t.Error("encFixed64") - break - } - x, e = o.DecodeFixed64() - if e != nil { - t.Error("decFixed64") - break - } - if x != i*1234567 { - t.Error("fixed64 decode fail:", i*1234567, x) - break - } - - o = old() - i32 := int32(i - 12345) - if o.EncodeZigzag32(uint64(i32)) != nil { - t.Fatal("EncodeZigzag32") - } - x, e = o.DecodeZigzag32() - if e != nil { - t.Fatal("DecodeZigzag32") - } - if x != uint64(uint32(i32)) { - t.Fatal("zigzag32 decode fail:", i32, x) - } - - o = old() - i64 := int64(i - 12345) - if o.EncodeZigzag64(uint64(i64)) != nil { - t.Fatal("EncodeZigzag64") - } - x, e = o.DecodeZigzag64() - if e != nil { - t.Fatal("DecodeZigzag64") - } - if x != uint64(i64) { - t.Fatal("zigzag64 decode fail:", i64, x) - } - } -} - -// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. -type fakeMarshaler struct { - b []byte - err error -} - -func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } -func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } -func (f *fakeMarshaler) ProtoMessage() {} -func (f *fakeMarshaler) Reset() {} - -type msgWithFakeMarshaler struct { - M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` -} - -func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } -func (m *msgWithFakeMarshaler) ProtoMessage() {} -func (m *msgWithFakeMarshaler) Reset() {} - -// Simple tests for proto messages that implement the Marshaler interface. -func TestMarshalerEncoding(t *testing.T) { - tests := []struct { - name string - m Message - want []byte - errType reflect.Type - }{ - { - name: "Marshaler that fails", - m: &fakeMarshaler{ - err: errors.New("some marshal err"), - b: []byte{5, 6, 7}, - }, - // Since the Marshal method returned bytes, they should be written to the - // buffer. (For efficiency, we assume that Marshal implementations are - // always correct w.r.t. RequiredNotSetError and output.) - want: []byte{5, 6, 7}, - errType: reflect.TypeOf(errors.New("some marshal err")), - }, - { - name: "Marshaler that fails with RequiredNotSetError", - m: &msgWithFakeMarshaler{ - M: &fakeMarshaler{ - err: &RequiredNotSetError{}, - b: []byte{5, 6, 7}, - }, - }, - // Since there's an error that can be continued after, - // the buffer should be written. - want: []byte{ - 10, 3, // for &msgWithFakeMarshaler - 5, 6, 7, // for &fakeMarshaler - }, - errType: reflect.TypeOf(&RequiredNotSetError{}), - }, - { - name: "Marshaler that succeeds", - m: &fakeMarshaler{ - b: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - } - for _, test := range tests { - b := NewBuffer(nil) - err := b.Marshal(test.m) - if reflect.TypeOf(err) != test.errType { - t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType) - } - if !reflect.DeepEqual(test.want, b.Bytes()) { - t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) - } - if size := Size(test.m); size != len(b.Bytes()) { - t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes())) - } - - m, mErr := Marshal(test.m) - if !bytes.Equal(b.Bytes(), m) { - t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes()) - } - if !reflect.DeepEqual(err, mErr) { - t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q", - test.name, fmt.Sprint(mErr), fmt.Sprint(err)) - } - } -} - -// Ensure that Buffer.Marshal uses O(N) memory for N messages -func TestBufferMarshalAllocs(t *testing.T) { - value := &OtherMessage{Key: Int64(1)} - msg := &MyMessage{Count: Int32(1), Others: []*OtherMessage{value}} - - reallocSize := func(t *testing.T, items int, prealloc int) (int64, int64) { - var b Buffer - b.SetBuf(make([]byte, 0, prealloc)) - - var allocSpace int64 - prevCap := cap(b.Bytes()) - for i := 0; i < items; i++ { - err := b.Marshal(msg) - if err != nil { - t.Errorf("Marshal err = %q", err) - break - } - if c := cap(b.Bytes()); prevCap != c { - allocSpace += int64(c) - prevCap = c - } - } - needSpace := int64(len(b.Bytes())) - return allocSpace, needSpace - } - - for _, prealloc := range []int{0, 100, 10000} { - for _, items := range []int{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000} { - runtimeSpace, need := reallocSize(t, items, prealloc) - totalSpace := int64(prealloc) + runtimeSpace - - runtimeRatio := float64(runtimeSpace) / float64(need) - totalRatio := float64(totalSpace) / float64(need) - - if totalRatio < 1 || runtimeRatio > 4 { - t.Errorf("needed %dB, allocated %dB total (ratio %.1f), allocated %dB at runtime (ratio %.1f)", - need, totalSpace, totalRatio, runtimeSpace, runtimeRatio) - } - } - } -} - -// Simple tests for bytes -func TestBytesPrimitives(t *testing.T) { - o := old() - bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} - if o.EncodeRawBytes(bytes) != nil { - t.Error("EncodeRawBytes") - } - decb, e := o.DecodeRawBytes(false) - if e != nil { - t.Error("DecodeRawBytes") - } - equalbytes(bytes, decb, t) -} - -// Simple tests for strings -func TestStringPrimitives(t *testing.T) { - o := old() - s := "now is the time" - if o.EncodeStringBytes(s) != nil { - t.Error("enc_string") - } - decs, e := o.DecodeStringBytes() - if e != nil { - t.Error("dec_string") - } - if s != decs { - t.Error("string encode/decode fail:", s, decs) - } -} - -// Do we catch the "required bit not set" case? -func TestRequiredBit(t *testing.T) { - o := old() - pb := new(GoTest) - err := o.Marshal(pb) - if err == nil { - t.Error("did not catch missing required fields") - } else if !strings.Contains(err.Error(), "Kind") { - t.Error("wrong error type:", err) - } -} - -// Check that all fields are nil. -// Clearly silly, and a residue from a more interesting test with an earlier, -// different initialization property, but it once caught a compiler bug so -// it lives. -func checkInitialized(pb *GoTest, t *testing.T) { - if pb.F_BoolDefaulted != nil { - t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) - } - if pb.F_Int32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) - } - if pb.F_Int64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) - } - if pb.F_Fixed32Defaulted != nil { - t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) - } - if pb.F_Fixed64Defaulted != nil { - t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) - } - if pb.F_Uint32Defaulted != nil { - t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) - } - if pb.F_Uint64Defaulted != nil { - t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) - } - if pb.F_FloatDefaulted != nil { - t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) - } - if pb.F_DoubleDefaulted != nil { - t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) - } - if pb.F_StringDefaulted != nil { - t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) - } - if pb.F_BytesDefaulted != nil { - t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) - } - if pb.F_Sint32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) - } - if pb.F_Sint64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) - } -} - -// Does Reset() reset? -func TestReset(t *testing.T) { - pb := initGoTest(true) - // muck with some values - pb.F_BoolDefaulted = Bool(false) - pb.F_Int32Defaulted = Int32(237) - pb.F_Int64Defaulted = Int64(12346) - pb.F_Fixed32Defaulted = Uint32(32000) - pb.F_Fixed64Defaulted = Uint64(666) - pb.F_Uint32Defaulted = Uint32(323232) - pb.F_Uint64Defaulted = nil - pb.F_FloatDefaulted = nil - pb.F_DoubleDefaulted = Float64(0) - pb.F_StringDefaulted = String("gotcha") - pb.F_BytesDefaulted = []byte("asdfasdf") - pb.F_Sint32Defaulted = Int32(123) - pb.F_Sint64Defaulted = Int64(789) - pb.Reset() - checkInitialized(pb, t) -} - -// All required fields set, no defaults provided. -func TestEncodeDecode1(t *testing.T) { - pb := initGoTest(false) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 0x20 - "714000000000000000"+ // field 14, encoding 1, value 0x40 - "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 - "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" - "b304"+ // field 70, encoding 3, start group - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // field 70, encoding 4, end group - "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 - "c906c0ffffffffffffff") // field 105, encoding 1, -64 fixed64 -} - -// All required fields set, defaults provided. -func TestEncodeDecode2(t *testing.T) { - pb := initGoTest(true) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 - "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f"+ // field 403, encoding 0, value 127 - "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32 - "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64 - -} - -// All default fields set to their default value by hand -func TestEncodeDecode3(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolDefaulted = Bool(true) - pb.F_Int32Defaulted = Int32(32) - pb.F_Int64Defaulted = Int64(64) - pb.F_Fixed32Defaulted = Uint32(320) - pb.F_Fixed64Defaulted = Uint64(640) - pb.F_Uint32Defaulted = Uint32(3200) - pb.F_Uint64Defaulted = Uint64(6400) - pb.F_FloatDefaulted = Float32(314159) - pb.F_DoubleDefaulted = Float64(271828) - pb.F_StringDefaulted = String("hello, \"world!\"\n") - pb.F_BytesDefaulted = []byte("Bignose") - pb.F_Sint32Defaulted = Int32(-32) - pb.F_Sint64Defaulted = Int64(-64) - pb.F_Sfixed32Defaulted = Int32(-32) - pb.F_Sfixed64Defaulted = Int64(-64) - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 - "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f"+ // field 403, encoding 0, value 127 - "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32 - "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64 - -} - -// All required fields set, defaults provided, all non-defaulted optional fields have values. -func TestEncodeDecode4(t *testing.T) { - pb := initGoTest(true) - pb.Table = String("hello") - pb.Param = Int32(7) - pb.OptionalField = initGoTestField() - pb.F_BoolOptional = Bool(true) - pb.F_Int32Optional = Int32(32) - pb.F_Int64Optional = Int64(64) - pb.F_Fixed32Optional = Uint32(3232) - pb.F_Fixed64Optional = Uint64(6464) - pb.F_Uint32Optional = Uint32(323232) - pb.F_Uint64Optional = Uint64(646464) - pb.F_FloatOptional = Float32(32.) - pb.F_DoubleOptional = Float64(64.) - pb.F_StringOptional = String("hello") - pb.F_BytesOptional = []byte("Bignose") - pb.F_Sint32Optional = Int32(-32) - pb.F_Sint64Optional = Int64(-64) - pb.F_Sfixed32Optional = Int32(-32) - pb.F_Sfixed64Optional = Int64(-64) - pb.Optionalgroup = initGoTest_OptionalGroup() - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" - "1807"+ // field 3, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "f00101"+ // field 30, encoding 0, value 1 - "f80120"+ // field 31, encoding 0, value 32 - "800240"+ // field 32, encoding 0, value 64 - "8d02a00c0000"+ // field 33, encoding 5, value 3232 - "91024019000000000000"+ // field 34, encoding 1, value 6464 - "9802a0dd13"+ // field 35, encoding 0, value 323232 - "a002c0ba27"+ // field 36, encoding 0, value 646464 - "ad0200000042"+ // field 37, encoding 5, value 32.0 - "b1020000000000005040"+ // field 38, encoding 1, value 64.0 - "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "d305"+ // start group field 90 level 1 - "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" - "d405"+ // end group field 90 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 - "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64 - "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" - "f0123f"+ // field 302, encoding 0, value 63 - "f8127f"+ // field 303, encoding 0, value 127 - "8513e0ffffff"+ // field 304, encoding 5, -32 fixed32 - "8913c0ffffffffffffff"+ // field 305, encoding 1, -64 fixed64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f"+ // field 403, encoding 0, value 127 - "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32 - "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64 - -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestEncodeDecode5(t *testing.T) { - pb := initGoTest(true) - pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} - pb.F_BoolRepeated = []bool{false, true} - pb.F_Int32Repeated = []int32{32, 33} - pb.F_Int64Repeated = []int64{64, 65} - pb.F_Fixed32Repeated = []uint32{3232, 3333} - pb.F_Fixed64Repeated = []uint64{6464, 6565} - pb.F_Uint32Repeated = []uint32{323232, 333333} - pb.F_Uint64Repeated = []uint64{646464, 656565} - pb.F_FloatRepeated = []float32{32., 33.} - pb.F_DoubleRepeated = []float64{64., 65.} - pb.F_StringRepeated = []string{"hello", "sailor"} - pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} - pb.F_Sint32Repeated = []int32{32, -32} - pb.F_Sint64Repeated = []int64{64, -64} - pb.F_Sfixed32Repeated = []int32{32, -32} - pb.F_Sfixed64Repeated = []int64{64, -64} - pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "a00100"+ // field 20, encoding 0, value 0 - "a00101"+ // field 20, encoding 0, value 1 - "a80120"+ // field 21, encoding 0, value 32 - "a80121"+ // field 21, encoding 0, value 33 - "b00140"+ // field 22, encoding 0, value 64 - "b00141"+ // field 22, encoding 0, value 65 - "bd01a00c0000"+ // field 23, encoding 5, value 3232 - "bd01050d0000"+ // field 23, encoding 5, value 3333 - "c1014019000000000000"+ // field 24, encoding 1, value 6464 - "c101a519000000000000"+ // field 24, encoding 1, value 6565 - "c801a0dd13"+ // field 25, encoding 0, value 323232 - "c80195ac14"+ // field 25, encoding 0, value 333333 - "d001c0ba27"+ // field 26, encoding 0, value 646464 - "d001b58928"+ // field 26, encoding 0, value 656565 - "dd0100000042"+ // field 27, encoding 5, value 32.0 - "dd0100000442"+ // field 27, encoding 5, value 33.0 - "e1010000000000005040"+ // field 28, encoding 1, value 64.0 - "e1010000000000405040"+ // field 28, encoding 1, value 65.0 - "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" - "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 - "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64 - "ca0c03"+"626967"+ // field 201, encoding 2, string "big" - "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" - "d00c40"+ // field 202, encoding 0, value 32 - "d00c3f"+ // field 202, encoding 0, value -32 - "d80c8001"+ // field 203, encoding 0, value 64 - "d80c7f"+ // field 203, encoding 0, value -64 - "e50c20000000"+ // field 204, encoding 5, 32 fixed32 - "e50ce0ffffff"+ // field 204, encoding 5, -32 fixed32 - "e90c4000000000000000"+ // field 205, encoding 1, 64 fixed64 - "e90cc0ffffffffffffff"+ // field 205, encoding 1, -64 fixed64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f"+ // field 403, encoding 0, value 127 - "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32 - "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64 - -} - -// All required fields set, all packed repeated fields given two values. -func TestEncodeDecode6(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolRepeatedPacked = []bool{false, true} - pb.F_Int32RepeatedPacked = []int32{32, 33} - pb.F_Int64RepeatedPacked = []int64{64, 65} - pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} - pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} - pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} - pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} - pb.F_FloatRepeatedPacked = []float32{32., 33.} - pb.F_DoubleRepeatedPacked = []float64{64., 65.} - pb.F_Sint32RepeatedPacked = []int32{32, -32} - pb.F_Sint64RepeatedPacked = []int64{64, -64} - pb.F_Sfixed32RepeatedPacked = []int32{32, -32} - pb.F_Sfixed64RepeatedPacked = []int64{64, -64} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 - "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 - "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 - "aa0308"+ // field 53, encoding 2, 8 bytes - "a00c0000050d0000"+ // value 3232, value 3333 - "b20310"+ // field 54, encoding 2, 16 bytes - "4019000000000000a519000000000000"+ // value 6464, value 6565 - "ba0306"+ // field 55, encoding 2, 6 bytes - "a0dd1395ac14"+ // value 323232, value 333333 - "c20306"+ // field 56, encoding 2, 6 bytes - "c0ba27b58928"+ // value 646464, value 656565 - "ca0308"+ // field 57, encoding 2, 8 bytes - "0000004200000442"+ // value 32.0, value 33.0 - "d20310"+ // field 58, encoding 2, 16 bytes - "00000000000050400000000000405040"+ // value 64.0, value 65.0 - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 - "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64 - "b21f02"+ // field 502, encoding 2, 2 bytes - "403f"+ // value 32, value -32 - "ba1f03"+ // field 503, encoding 2, 3 bytes - "80017f"+ // value 64, value -64 - "c21f08"+ // field 504, encoding 2, 8 bytes - "20000000e0ffffff"+ // value 32, value -32 - "ca1f10"+ // field 505, encoding 2, 16 bytes - "4000000000000000c0ffffffffffffff") // value 64, value -64 - -} - -// Test that we can encode empty bytes fields. -func TestEncodeDecodeBytes1(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRequired = []byte{} - pb.F_BytesRepeated = [][]byte{{}} - pb.F_BytesOptional = []byte{} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { - t.Error("required empty bytes field is incorrect") - } - if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { - t.Error("repeated empty bytes field is incorrect") - } - if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { - t.Error("optional empty bytes field is incorrect") - } -} - -// Test that we encode nil-valued fields of a repeated bytes field correctly. -// Since entries in a repeated field cannot be nil, nil must mean empty value. -func TestEncodeDecodeBytes2(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRepeated = [][]byte{nil} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { - t.Error("Unexpected value for repeated bytes field") - } -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestSkippingUnrecognizedFields(t *testing.T) { - o := old() - pb := initGoTestField() - - // Marshal it normally. - o.Marshal(pb) - - // Now new a GoSkipTest record. - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - // Marshal it into same buffer. - o.Marshal(skip) - - pbd := new(GoTestField) - o.Unmarshal(pbd) - - // The __unrecognized field should be a marshaling of GoSkipTest - skipd := new(GoSkipTest) - - o.SetBuf(pbd.XXX_unrecognized) - o.Unmarshal(skipd) - - if *skipd.SkipInt32 != *skip.SkipInt32 { - t.Error("skip int32", skipd.SkipInt32) - } - if *skipd.SkipFixed32 != *skip.SkipFixed32 { - t.Error("skip fixed32", skipd.SkipFixed32) - } - if *skipd.SkipFixed64 != *skip.SkipFixed64 { - t.Error("skip fixed64", skipd.SkipFixed64) - } - if *skipd.SkipString != *skip.SkipString { - t.Error("skip string", *skipd.SkipString) - } - if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { - t.Error("skip group int32", skipd.Skipgroup.GroupInt32) - } - if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { - t.Error("skip group string", *skipd.Skipgroup.GroupString) - } -} - -// Check that unrecognized fields of a submessage are preserved. -func TestSubmessageUnrecognizedFields(t *testing.T) { - nm := &NewMessage{ - Nested: &NewMessage_Nested{ - Name: String("Nigel"), - FoodGroup: String("carbs"), - }, - } - b, err := Marshal(nm) - if err != nil { - t.Fatalf("Marshal of NewMessage: %v", err) - } - - // Unmarshal into an OldMessage. - om := new(OldMessage) - if err := Unmarshal(b, om); err != nil { - t.Fatalf("Unmarshal to OldMessage: %v", err) - } - exp := &OldMessage{ - Nested: &OldMessage_Nested{ - Name: String("Nigel"), - // normal protocol buffer users should not do this - XXX_unrecognized: []byte("\x12\x05carbs"), - }, - } - if !Equal(om, exp) { - t.Errorf("om = %v, want %v", om, exp) - } - - // Clone the OldMessage. - om = Clone(om).(*OldMessage) - if !Equal(om, exp) { - t.Errorf("Clone(om) = %v, want %v", om, exp) - } - - // Marshal the OldMessage, then unmarshal it into an empty NewMessage. - if b, err = Marshal(om); err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - t.Logf("Marshal(%v) -> %q", om, b) - nm2 := new(NewMessage) - if err := Unmarshal(b, nm2); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - if !Equal(nm, nm2) { - t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) - } -} - -// Check that an int32 field can be upgraded to an int64 field. -func TestNegativeInt32(t *testing.T) { - om := &OldMessage{ - Num: Int32(-1), - } - b, err := Marshal(om) - if err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - - // Check the size. It should be 11 bytes; - // 1 for the field/wire type, and 10 for the negative number. - if len(b) != 11 { - t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) - } - - // Unmarshal into a NewMessage. - nm := new(NewMessage) - if err := Unmarshal(b, nm); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - want := &NewMessage{ - Num: Int64(-1), - } - if !Equal(nm, want) { - t.Errorf("nm = %v, want %v", nm, want) - } -} - -// Check that we can grow an array (repeated field) to have many elements. -// This test doesn't depend only on our encoding; for variety, it makes sure -// we create, encode, and decode the correct contents explicitly. It's therefore -// a bit messier. -// This test also uses (and hence tests) the Marshal/Unmarshal functions -// instead of the methods. -func TestBigRepeated(t *testing.T) { - pb := initGoTest(true) - - // Create the arrays - const N = 50 // Internally the library starts much smaller. - pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) - pb.F_Sint64Repeated = make([]int64, N) - pb.F_Sint32Repeated = make([]int32, N) - pb.F_BytesRepeated = make([][]byte, N) - pb.F_StringRepeated = make([]string, N) - pb.F_DoubleRepeated = make([]float64, N) - pb.F_FloatRepeated = make([]float32, N) - pb.F_Uint64Repeated = make([]uint64, N) - pb.F_Uint32Repeated = make([]uint32, N) - pb.F_Fixed64Repeated = make([]uint64, N) - pb.F_Fixed32Repeated = make([]uint32, N) - pb.F_Int64Repeated = make([]int64, N) - pb.F_Int32Repeated = make([]int32, N) - pb.F_BoolRepeated = make([]bool, N) - pb.RepeatedField = make([]*GoTestField, N) - - // Fill in the arrays with checkable values. - igtf := initGoTestField() - igtrg := initGoTest_RepeatedGroup() - for i := 0; i < N; i++ { - pb.Repeatedgroup[i] = igtrg - pb.F_Sint64Repeated[i] = int64(i) - pb.F_Sint32Repeated[i] = int32(i) - s := fmt.Sprint(i) - pb.F_BytesRepeated[i] = []byte(s) - pb.F_StringRepeated[i] = s - pb.F_DoubleRepeated[i] = float64(i) - pb.F_FloatRepeated[i] = float32(i) - pb.F_Uint64Repeated[i] = uint64(i) - pb.F_Uint32Repeated[i] = uint32(i) - pb.F_Fixed64Repeated[i] = uint64(i) - pb.F_Fixed32Repeated[i] = uint32(i) - pb.F_Int64Repeated[i] = int64(i) - pb.F_Int32Repeated[i] = int32(i) - pb.F_BoolRepeated[i] = i%2 == 0 - pb.RepeatedField[i] = igtf - } - - // Marshal. - buf, _ := Marshal(pb) - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - Unmarshal(buf, pbd) - - // Check the checkable values - for i := uint64(0); i < N; i++ { - if pbd.Repeatedgroup[i] == nil { // TODO: more checking? - t.Error("pbd.Repeatedgroup bad") - } - if x := uint64(pbd.F_Sint64Repeated[i]); x != i { - t.Error("pbd.F_Sint64Repeated bad", x, i) - } - if x := uint64(pbd.F_Sint32Repeated[i]); x != i { - t.Error("pbd.F_Sint32Repeated bad", x, i) - } - s := fmt.Sprint(i) - equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) - if pbd.F_StringRepeated[i] != s { - t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) - } - if x := uint64(pbd.F_DoubleRepeated[i]); x != i { - t.Error("pbd.F_DoubleRepeated bad", x, i) - } - if x := uint64(pbd.F_FloatRepeated[i]); x != i { - t.Error("pbd.F_FloatRepeated bad", x, i) - } - if x := pbd.F_Uint64Repeated[i]; x != i { - t.Error("pbd.F_Uint64Repeated bad", x, i) - } - if x := uint64(pbd.F_Uint32Repeated[i]); x != i { - t.Error("pbd.F_Uint32Repeated bad", x, i) - } - if x := pbd.F_Fixed64Repeated[i]; x != i { - t.Error("pbd.F_Fixed64Repeated bad", x, i) - } - if x := uint64(pbd.F_Fixed32Repeated[i]); x != i { - t.Error("pbd.F_Fixed32Repeated bad", x, i) - } - if x := uint64(pbd.F_Int64Repeated[i]); x != i { - t.Error("pbd.F_Int64Repeated bad", x, i) - } - if x := uint64(pbd.F_Int32Repeated[i]); x != i { - t.Error("pbd.F_Int32Repeated bad", x, i) - } - if x := pbd.F_BoolRepeated[i]; x != (i%2 == 0) { - t.Error("pbd.F_BoolRepeated bad", x, i) - } - if pbd.RepeatedField[i] == nil { // TODO: more checking? - t.Error("pbd.RepeatedField bad") - } - } -} - -func TestBadWireTypeUnknown(t *testing.T) { - var b []byte - fmt.Sscanf("0a01780d00000000080b101612036161611521000000202c220362626225370000002203636363214200000000000000584d5a036464645900000000000056405d63000000", "%x", &b) - - m := new(MyMessage) - if err := Unmarshal(b, m); err != nil { - t.Errorf("unexpected Unmarshal error: %v", err) - } - - var unknown []byte - fmt.Sscanf("0a01780d0000000010161521000000202c2537000000214200000000000000584d5a036464645d63000000", "%x", &unknown) - if !bytes.Equal(m.XXX_unrecognized, unknown) { - t.Errorf("unknown bytes mismatch:\ngot %x\nwant %x", m.XXX_unrecognized, unknown) - } - DiscardUnknown(m) - - want := &MyMessage{Count: Int32(11), Name: String("aaa"), Pet: []string{"bbb", "ccc"}, Bigfloat: Float64(88)} - if !Equal(m, want) { - t.Errorf("message mismatch:\ngot %v\nwant %v", m, want) - } -} - -func encodeDecode(t *testing.T, in, out Message, msg string) { - buf, err := Marshal(in) - if err != nil { - t.Fatalf("failed marshaling %v: %v", msg, err) - } - if err := Unmarshal(buf, out); err != nil { - t.Fatalf("failed unmarshaling %v: %v", msg, err) - } -} - -func TestPackedNonPackedDecoderSwitching(t *testing.T) { - np, p := new(NonPackedTest), new(PackedTest) - - // non-packed -> packed - np.A = []int32{0, 1, 1, 2, 3, 5} - encodeDecode(t, np, p, "non-packed -> packed") - if !reflect.DeepEqual(np.A, p.B) { - t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) - } - - // packed -> non-packed - np.Reset() - p.B = []int32{3, 1, 4, 1, 5, 9} - encodeDecode(t, p, np, "packed -> non-packed") - if !reflect.DeepEqual(p.B, np.A) { - t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) - } -} - -func TestProto1RepeatedGroup(t *testing.T) { - pb := &MessageList{ - Message: []*MessageList_Message{ - { - Name: String("blah"), - Count: Int32(7), - }, - // NOTE: pb.Message[1] is a nil - nil, - }, - } - - o := old() - err := o.Marshal(pb) - if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { - t.Fatalf("unexpected or no error when marshaling: %v", err) - } -} - -// Test that enums work. Checks for a bug introduced by making enums -// named types instead of int32: newInt32FromUint64 would crash with -// a type mismatch in reflect.PointTo. -func TestEnum(t *testing.T) { - pb := new(GoEnum) - pb.Foo = FOO_FOO1.Enum() - o := old() - if err := o.Marshal(pb); err != nil { - t.Fatal("error encoding enum:", err) - } - pb1 := new(GoEnum) - if err := o.Unmarshal(pb1); err != nil { - t.Fatal("error decoding enum:", err) - } - if *pb1.Foo != FOO_FOO1 { - t.Error("expected 7 but got ", *pb1.Foo) - } -} - -// Enum types have String methods. Check that enum fields can be printed. -// We don't care what the value actually is, just as long as it doesn't crash. -func TestPrintingNilEnumFields(t *testing.T) { - pb := new(GoEnum) - _ = fmt.Sprintf("%+v", pb) -} - -// Verify that absent required fields cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcement(t *testing.T) { - pb := new(GoTestField) - _, err := Marshal(pb) - if err == nil { - t.Error("marshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") { - t.Errorf("marshal: bad error type: %v", err) - } - - // A slightly sneaky, yet valid, proto. It encodes the same required field twice, - // so simply counting the required fields is insufficient. - // field 1, encoding 2, value "hi" - buf := []byte("\x0A\x02hi\x0A\x02hi") - err = Unmarshal(buf, pb) - if err == nil { - t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Type") && !strings.Contains(err.Error(), "{Unknown}") { - // TODO: remove unknown cases once we commit to the new unmarshaler. - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcementGroups(t *testing.T) { - pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}} - if _, err := Marshal(pb); err == nil { - t.Error("marshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") { - t.Errorf("marshal: bad error type: %v", err) - } - - buf := []byte{11, 12} - if err := Unmarshal(buf, pb); err == nil { - t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") && !strings.Contains(err.Error(), "Group.{Unknown}") { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -func TestTypedNilMarshal(t *testing.T) { - // A typed nil should return ErrNil and not crash. - { - var m *GoEnum - if _, err := Marshal(m); err != ErrNil { - t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err) - } - } - - { - m := &Communique{Union: &Communique_Msg{nil}} - if _, err := Marshal(m); err == nil || err == ErrNil { - t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) - } - } -} - -// A type that implements the Marshaler interface, but is not nillable. -type nonNillableInt uint64 - -func (nni nonNillableInt) Marshal() ([]byte, error) { - return EncodeVarint(uint64(nni)), nil -} - -type NNIMessage struct { - nni nonNillableInt -} - -func (*NNIMessage) Reset() {} -func (*NNIMessage) String() string { return "" } -func (*NNIMessage) ProtoMessage() {} - -type NMMessage struct{} - -func (*NMMessage) Reset() {} -func (*NMMessage) String() string { return "" } -func (*NMMessage) ProtoMessage() {} - -// Verify a type that uses the Marshaler interface, but has a nil pointer. -func TestNilMarshaler(t *testing.T) { - // Try a struct with a Marshaler field that is nil. - // It should be directly marshable. - nmm := new(NMMessage) - if _, err := Marshal(nmm); err != nil { - t.Error("unexpected error marshaling nmm: ", err) - } - - // Try a struct with a Marshaler field that is not nillable. - nnim := new(NNIMessage) - nnim.nni = 7 - var _ Marshaler = nnim.nni // verify it is truly a Marshaler - if _, err := Marshal(nnim); err != nil { - t.Error("unexpected error marshaling nnim: ", err) - } -} - -func TestAllSetDefaults(t *testing.T) { - // Exercise SetDefaults with all scalar field types. - m := &Defaults{ - // NaN != NaN, so override that here. - F_Nan: Float32(1.7), - } - expected := &Defaults{ - F_Bool: Bool(true), - F_Int32: Int32(32), - F_Int64: Int64(64), - F_Fixed32: Uint32(320), - F_Fixed64: Uint64(640), - F_Uint32: Uint32(3200), - F_Uint64: Uint64(6400), - F_Float: Float32(314159), - F_Double: Float64(271828), - F_String: String(`hello, "world!"` + "\n"), - F_Bytes: []byte("Bignose"), - F_Sint32: Int32(-32), - F_Sint64: Int64(-64), - F_Enum: Defaults_GREEN.Enum(), - F_Pinf: Float32(float32(math.Inf(1))), - F_Ninf: Float32(float32(math.Inf(-1))), - F_Nan: Float32(1.7), - StrZero: String(""), - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithSetField(t *testing.T) { - // Check that a set value is not overridden. - m := &Defaults{ - F_Int32: Int32(12), - } - SetDefaults(m) - if v := m.GetF_Int32(); v != 12 { - t.Errorf("m.FInt32 = %v, want 12", v) - } -} - -func TestSetDefaultsWithSubMessage(t *testing.T) { - m := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - }, - } - expected := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - Port: Int32(4000), - }, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { - m := &MyMessage{ - RepInner: []*InnerMessage{{}}, - } - expected := &MyMessage{ - RepInner: []*InnerMessage{{ - Port: Int32(4000), - }}, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { - m := &MyMessage{ - Pet: []string{"turtle", "wombat"}, - } - expected := Clone(m) - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestMaximumTagNumber(t *testing.T) { - m := &MaxTag{ - LastField: String("natural goat essence"), - } - buf, err := Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal failed: %v", err) - } - m2 := new(MaxTag) - if err := Unmarshal(buf, m2); err != nil { - t.Fatalf("proto.Unmarshal failed: %v", err) - } - if got, want := m2.GetLastField(), *m.LastField; got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -func TestJSON(t *testing.T) { - m := &MyMessage{ - Count: Int32(4), - Pet: []string{"bunny", "kitty"}, - Inner: &InnerMessage{ - Host: String("cauchy"), - }, - Bikeshed: MyMessage_GREEN.Enum(), - } - const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` - - b, err := json.Marshal(m) - if err != nil { - t.Fatalf("json.Marshal failed: %v", err) - } - s := string(b) - if s != expected { - t.Errorf("got %s\nwant %s", s, expected) - } - - received := new(MyMessage) - if err := json.Unmarshal(b, received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } - - // Test unmarshalling of JSON with symbolic enum name. - const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` - received.Reset() - if err := json.Unmarshal([]byte(old), received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } -} - -func TestBadWireType(t *testing.T) { - b := []byte{7<<3 | 6} // field 7, wire type 6 - pb := new(OtherMessage) - if err := Unmarshal(b, pb); err == nil { - t.Errorf("Unmarshal did not fail") - } else if !strings.Contains(err.Error(), "unknown wire type") { - t.Errorf("wrong error: %v", err) - } -} - -func TestBytesWithInvalidLength(t *testing.T) { - // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. - b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} - Unmarshal(b, new(MyMessage)) -} - -func TestLengthOverflow(t *testing.T) { - // Overflowing a length should not panic. - b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} - Unmarshal(b, new(MyMessage)) -} - -func TestVarintOverflow(t *testing.T) { - // Overflowing a 64-bit length should not be allowed. - b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} - if err := Unmarshal(b, new(MyMessage)); err == nil { - t.Fatalf("Overflowed uint64 length without error") - } -} - -func TestBytesWithInvalidLengthInGroup(t *testing.T) { - // Overflowing a 64-bit length should not be allowed. - b := []byte{0xbb, 0x30, 0xb2, 0x30, 0xb0, 0xb2, 0x83, 0xf1, 0xb0, 0xb2, 0xef, 0xbf, 0xbd, 0x01} - if err := Unmarshal(b, new(MyMessage)); err == nil { - t.Fatalf("Overflowed uint64 length without error") - } -} - -func TestUnmarshalFuzz(t *testing.T) { - const N = 1000 - seed := time.Now().UnixNano() - t.Logf("RNG seed is %d", seed) - rng := rand.New(rand.NewSource(seed)) - buf := make([]byte, 20) - for i := 0; i < N; i++ { - for j := range buf { - buf[j] = byte(rng.Intn(256)) - } - fuzzUnmarshal(t, buf) - } -} - -func TestMergeMessages(t *testing.T) { - pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} - data, err := Marshal(pb) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - pb1 := new(MessageList) - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("first Unmarshal: %v", err) - } - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("second Unmarshal: %v", err) - } - if len(pb1.Message) != 1 { - t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) - } - - pb2 := new(MessageList) - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("first UnmarshalMerge: %v", err) - } - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("second UnmarshalMerge: %v", err) - } - if len(pb2.Message) != 2 { - t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) - } -} - -func TestExtensionMarshalOrder(t *testing.T) { - m := &MyMessage{Count: Int(123)} - if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { - t.Fatalf("SetExtension: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - var orig []byte - for i := 0; i < 100; i++ { - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if i == 0 { - orig = b - continue - } - if !bytes.Equal(b, orig) { - t.Errorf("Bytes differ on attempt #%d", i) - } - } -} - -func TestExtensionMapFieldMarshalDeterministic(t *testing.T) { - m := &MyMessage{Count: Int(123)} - if err := SetExtension(m, E_Ext_More, &Ext{MapField: map[int32]int32{1: 1, 2: 2, 3: 3, 4: 4}}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - marshal := func(m Message) []byte { - var b Buffer - b.SetDeterministic(true) - if err := b.Marshal(m); err != nil { - t.Fatalf("Marshal failed: %v", err) - } - return b.Bytes() - } - - want := marshal(m) - for i := 0; i < 100; i++ { - if got := marshal(m); !bytes.Equal(got, want) { - t.Errorf("Marshal produced inconsistent output with determinism enabled (pass %d).\n got %v\nwant %v", i, got, want) - } - } -} - -// Many extensions, because small maps might not iterate differently on each iteration. -var exts = []*ExtensionDesc{ - E_X201, - E_X202, - E_X203, - E_X204, - E_X205, - E_X206, - E_X207, - E_X208, - E_X209, - E_X210, - E_X211, - E_X212, - E_X213, - E_X214, - E_X215, - E_X216, - E_X217, - E_X218, - E_X219, - E_X220, - E_X221, - E_X222, - E_X223, - E_X224, - E_X225, - E_X226, - E_X227, - E_X228, - E_X229, - E_X230, - E_X231, - E_X232, - E_X233, - E_X234, - E_X235, - E_X236, - E_X237, - E_X238, - E_X239, - E_X240, - E_X241, - E_X242, - E_X243, - E_X244, - E_X245, - E_X246, - E_X247, - E_X248, - E_X249, - E_X250, -} - -func TestMessageSetMarshalOrder(t *testing.T) { - m := &MyMessageSet{} - for _, x := range exts { - if err := SetExtension(m, x, &Empty{}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - } - - buf, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - for i := 0; i < 10; i++ { - b1, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(b1, buf) { - t.Errorf("Bytes differ on re-Marshal #%d", i) - } - - m2 := &MyMessageSet{} - if err := Unmarshal(buf, m2); err != nil { - t.Errorf("Unmarshal: %v", err) - } - b2, err := Marshal(m2) - if err != nil { - t.Errorf("re-Marshal: %v", err) - } - if !bytes.Equal(b2, buf) { - t.Errorf("Bytes differ on round-trip #%d", i) - } - } -} - -func TestUnmarshalMergesMessages(t *testing.T) { - // If a nested message occurs twice in the input, - // the fields should be merged when decoding. - a := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("polhode"), - Port: Int32(1234), - }, - } - aData, err := Marshal(a) - if err != nil { - t.Fatalf("Marshal(a): %v", err) - } - b := &OtherMessage{ - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Connected: Bool(true), - }, - } - bData, err := Marshal(b) - if err != nil { - t.Fatalf("Marshal(b): %v", err) - } - want := &OtherMessage{ - Key: Int64(123), - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Port: Int32(1234), - Connected: Bool(true), - }, - } - got := new(OtherMessage) - if err := Unmarshal(append(aData, bData...), got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !Equal(got, want) { - t.Errorf("\n got %v\nwant %v", got, want) - } -} - -func TestUnmarshalMergesGroups(t *testing.T) { - // If a nested group occurs twice in the input, - // the fields should be merged when decoding. - a := &GroupNew{ - G: &GroupNew_G{ - X: Int32(7), - Y: Int32(8), - }, - } - aData, err := Marshal(a) - if err != nil { - t.Fatalf("Marshal(a): %v", err) - } - b := &GroupNew{ - G: &GroupNew_G{ - X: Int32(9), - }, - } - bData, err := Marshal(b) - if err != nil { - t.Fatalf("Marshal(b): %v", err) - } - want := &GroupNew{ - G: &GroupNew_G{ - X: Int32(9), - Y: Int32(8), - }, - } - got := new(GroupNew) - if err := Unmarshal(append(aData, bData...), got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !Equal(got, want) { - t.Errorf("\n got %v\nwant %v", got, want) - } -} - -func TestEncodingSizes(t *testing.T) { - tests := []struct { - m Message - n int - }{ - {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, - {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, - {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, - {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, - } - for _, test := range tests { - b, err := Marshal(test.m) - if err != nil { - t.Errorf("Marshal(%v): %v", test.m, err) - continue - } - if len(b) != test.n { - t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) - } - } -} - -func TestRequiredNotSetError(t *testing.T) { - pb := initGoTest(false) - pb.RequiredField.Label = nil - pb.F_Int32Required = nil - pb.F_Int64Required = nil - - expected := "0807" + // field 1, encoding 0, value 7 - "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) - "5001" + // field 10, encoding 0, value 1 - "6d20000000" + // field 13, encoding 5, value 0x20 - "714000000000000000" + // field 14, encoding 1, value 0x40 - "78a019" + // field 15, encoding 0, value 0xca0 = 3232 - "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45" + // field 17, encoding 5, value 3232.0 - "9101000000000040b940" + // field 18, encoding 1, value 6464.0 - "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" - "b304" + // field 70, encoding 3, start group - "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" - "b404" + // field 70, encoding 4, end group - "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" - "b0063f" + // field 102, encoding 0, 0x3f zigzag32 - "b8067f" + // field 103, encoding 0, 0x7f zigzag64 - "c506e0ffffff" + // field 104, encoding 5, -32 fixed32 - "c906c0ffffffffffffff" // field 105, encoding 1, -64 fixed64 - - o := old() - bytes, err := Marshal(pb) - if _, ok := err.(*RequiredNotSetError); !ok { - fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("expected = %s", expected) - } - if !strings.Contains(err.Error(), "RequiredField.Label") { - t.Errorf("marshal-1 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 1", bytes) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = Unmarshal(bytes, pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if !strings.Contains(err.Error(), "RequiredField.Label") && !strings.Contains(err.Error(), "RequiredField.{Unknown}") { - t.Errorf("unmarshal wrong err msg: %v", err) - } - bytes, err = Marshal(pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if !strings.Contains(err.Error(), "RequiredField.Label") { - t.Errorf("marshal-2 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 2", bytes) - t.Fatalf("string = %s", expected) - } -} - -func TestRequiredNotSetErrorWithBadWireTypes(t *testing.T) { - // Required field expects a varint, and properly found a varint. - if err := Unmarshal([]byte{0x08, 0x00}, new(GoEnum)); err != nil { - t.Errorf("Unmarshal = %v, want nil", err) - } - // Required field expects a varint, but found a fixed32 instead. - if err := Unmarshal([]byte{0x0d, 0x00, 0x00, 0x00, 0x00}, new(GoEnum)); err == nil { - t.Errorf("Unmarshal = nil, want RequiredNotSetError") - } - // Required field expects a varint, and found both a varint and fixed32 (ignored). - m := new(GoEnum) - if err := Unmarshal([]byte{0x08, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00}, m); err != nil { - t.Errorf("Unmarshal = %v, want nil", err) - } - if !bytes.Equal(m.XXX_unrecognized, []byte{0x0d, 0x00, 0x00, 0x00, 0x00}) { - t.Errorf("expected fixed32 to appear as unknown bytes: %x", m.XXX_unrecognized) - } -} - -func fuzzUnmarshal(t *testing.T, data []byte) { - defer func() { - if e := recover(); e != nil { - t.Errorf("These bytes caused a panic: %+v", data) - t.Logf("Stack:\n%s", debug.Stack()) - t.FailNow() - } - }() - - pb := new(MyMessage) - Unmarshal(data, pb) -} - -func TestMapFieldMarshal(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // b should be the concatenation of these three byte sequences in some order. - parts := []string{ - "\n\a\b\x01\x12\x03Rob", - "\n\a\b\x04\x12\x03Ian", - "\n\b\b\x08\x12\x04Dave", - } - ok := false - for i := range parts { - for j := range parts { - if j == i { - continue - } - for k := range parts { - if k == i || k == j { - continue - } - try := parts[i] + parts[j] + parts[k] - if bytes.Equal(b, []byte(try)) { - ok = true - break - } - } - } - } - if !ok { - t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) - } - t.Logf("FYI b: %q", b) - - (new(Buffer)).DebugPrint("Dump of b", b) -} - -func TestMapFieldDeterministicMarshal(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - } - - marshal := func(m Message) []byte { - var b Buffer - b.SetDeterministic(true) - if err := b.Marshal(m); err != nil { - t.Fatalf("Marshal failed: %v", err) - } - return b.Bytes() - } - - want := marshal(m) - for i := 0; i < 10; i++ { - if got := marshal(m); !bytes.Equal(got, want) { - t.Errorf("Marshal produced inconsistent output with determinism enabled (pass %d).\n got %v\nwant %v", i, got, want) - } - } -} - -func TestMapFieldRoundTrips(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - MsgMapping: map[int64]*FloatingPoint{ - 0x7001: {F: Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{ - false: []byte("that's not right!"), - true: []byte("aye, 'tis true!"), - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("FYI b: %q", b) - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !Equal(m, m2) { - t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", m, m2) - } -} - -func TestMapFieldWithNil(t *testing.T) { - m1 := &MessageWithMap{ - MsgMapping: map[int64]*FloatingPoint{ - 1: nil, - }, - } - b, err := Marshal(m1) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) - } - if v, ok := m2.MsgMapping[1]; !ok { - t.Error("msg_mapping[1] not present") - } else if v != nil { - t.Errorf("msg_mapping[1] not nil: %v", v) - } -} - -func TestMapFieldWithNilBytes(t *testing.T) { - m1 := &MessageWithMap{ - ByteMapping: map[bool][]byte{ - false: {}, - true: nil, - }, - } - n := Size(m1) - b, err := Marshal(m1) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if n != len(b) { - t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b)) - } - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) - } - if v, ok := m2.ByteMapping[false]; !ok { - t.Error("byte_mapping[false] not present") - } else if len(v) != 0 { - t.Errorf("byte_mapping[false] not empty: %#v", v) - } - if v, ok := m2.ByteMapping[true]; !ok { - t.Error("byte_mapping[true] not present") - } else if len(v) != 0 { - t.Errorf("byte_mapping[true] not empty: %#v", v) - } -} - -func TestDecodeMapFieldMissingKey(t *testing.T) { - b := []byte{ - 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes - // no key - 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m" - } - got := &MessageWithMap{} - err := Unmarshal(b, got) - if err != nil { - t.Fatalf("failed to marshal map with missing key: %v", err) - } - want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}} - if !Equal(got, want) { - t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want) - } -} - -func TestDecodeMapFieldMissingValue(t *testing.T) { - b := []byte{ - 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes - 0x08, 0x01, // varint key, value 1 - // no value - } - got := &MessageWithMap{} - err := Unmarshal(b, got) - if err != nil { - t.Fatalf("failed to marshal map with missing value: %v", err) - } - want := &MessageWithMap{NameMapping: map[int32]string{1: ""}} - if !Equal(got, want) { - t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want) - } -} - -func TestOneof(t *testing.T) { - m := &Communique{} - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal of empty message with oneof: %v", err) - } - if len(b) != 0 { - t.Errorf("Marshal of empty message yielded too many bytes: %v", b) - } - - m = &Communique{ - Union: &Communique_Name{"Barry"}, - } - - // Round-trip. - b, err = Marshal(m) - if err != nil { - t.Fatalf("Marshal of message with oneof: %v", err) - } - if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5) - t.Errorf("Incorrect marshal of message with oneof: %v", b) - } - m.Reset() - if err := Unmarshal(b, m); err != nil { - t.Fatalf("Unmarshal of message with oneof: %v", err) - } - if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" { - t.Errorf("After round trip, Union = %+v", m.Union) - } - if name := m.GetName(); name != "Barry" { - t.Errorf("After round trip, GetName = %q, want %q", name, "Barry") - } - - // Let's try with a message in the oneof. - m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}} - b, err = Marshal(m) - if err != nil { - t.Fatalf("Marshal of message with oneof set to message: %v", err) - } - if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16) - t.Errorf("Incorrect marshal of message with oneof set to message: %v", b) - } - m.Reset() - if err := Unmarshal(b, m); err != nil { - t.Fatalf("Unmarshal of message with oneof set to message: %v", err) - } - ss, ok := m.Union.(*Communique_Msg) - if !ok || ss.Msg.GetStringField() != "deep deep string" { - t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union) - } -} - -func TestOneofNilBytes(t *testing.T) { - // A oneof with nil byte slice should marshal to tag + 0 (size), with no error. - m := &Communique{Union: &Communique_Data{Data: nil}} - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal failed: %v", err) - } - want := []byte{ - 7<<3 | 2, // tag 7, wire type 2 - 0, // size - } - if !bytes.Equal(b, want) { - t.Errorf("Wrong result of Marshal: got %x, want %x", b, want) - } -} - -func TestInefficientPackedBool(t *testing.T) { - // https://github.com/golang/protobuf/issues/76 - inp := []byte{ - 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes - // Usually a bool should take a single byte, - // but it is permitted to be any varint. - 0xb9, 0x30, - } - if err := Unmarshal(inp, new(MoreRepeated)); err != nil { - t.Error(err) - } -} - -// Make sure pure-reflect-based implementation handles -// []int32-[]enum conversion correctly. -func TestRepeatedEnum2(t *testing.T) { - pb := &RepeatedEnum{ - Color: []RepeatedEnum_Color{RepeatedEnum_RED}, - } - b, err := Marshal(pb) - if err != nil { - t.Fatalf("Marshal failed: %v", err) - } - x := new(RepeatedEnum) - err = Unmarshal(b, x) - if err != nil { - t.Fatalf("Unmarshal failed: %v", err) - } - if !Equal(pb, x) { - t.Errorf("Incorrect result: want: %v got: %v", pb, x) - } -} - -// TestConcurrentMarshal makes sure that it is safe to marshal -// same message in multiple goroutines concurrently. -func TestConcurrentMarshal(t *testing.T) { - pb := initGoTest(true) - const N = 100 - b := make([][]byte, N) - - var wg sync.WaitGroup - for i := 0; i < N; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - var err error - b[i], err = Marshal(pb) - if err != nil { - t.Errorf("marshal error: %v", err) - } - }(i) - } - - wg.Wait() - for i := 1; i < N; i++ { - if !bytes.Equal(b[0], b[i]) { - t.Errorf("concurrent marshal result not same: b[0] = %v, b[%d] = %v", b[0], i, b[i]) - } - } -} - -func TestInvalidUTF8(t *testing.T) { - const invalidUTF8 = "\xde\xad\xbe\xef\x80\x00\xff" - tests := []struct { - label string - proto2 Message - proto3 Message - want []byte - }{{ - label: "Scalar", - proto2: &TestUTF8{Scalar: String(invalidUTF8)}, - proto3: &pb3.TestUTF8{Scalar: invalidUTF8}, - want: []byte{0x0a, 0x07, 0xde, 0xad, 0xbe, 0xef, 0x80, 0x00, 0xff}, - }, { - label: "Vector", - proto2: &TestUTF8{Vector: []string{invalidUTF8}}, - proto3: &pb3.TestUTF8{Vector: []string{invalidUTF8}}, - want: []byte{0x12, 0x07, 0xde, 0xad, 0xbe, 0xef, 0x80, 0x00, 0xff}, - }, { - label: "Oneof", - proto2: &TestUTF8{Oneof: &TestUTF8_Field{invalidUTF8}}, - proto3: &pb3.TestUTF8{Oneof: &pb3.TestUTF8_Field{invalidUTF8}}, - want: []byte{0x1a, 0x07, 0xde, 0xad, 0xbe, 0xef, 0x80, 0x00, 0xff}, - }, { - label: "MapKey", - proto2: &TestUTF8{MapKey: map[string]int64{invalidUTF8: 0}}, - proto3: &pb3.TestUTF8{MapKey: map[string]int64{invalidUTF8: 0}}, - want: []byte{0x22, 0x0b, 0x0a, 0x07, 0xde, 0xad, 0xbe, 0xef, 0x80, 0x00, 0xff, 0x10, 0x00}, - }, { - label: "MapValue", - proto2: &TestUTF8{MapValue: map[int64]string{0: invalidUTF8}}, - proto3: &pb3.TestUTF8{MapValue: map[int64]string{0: invalidUTF8}}, - want: []byte{0x2a, 0x0b, 0x08, 0x00, 0x12, 0x07, 0xde, 0xad, 0xbe, 0xef, 0x80, 0x00, 0xff}, - }} - - for _, tt := range tests { - // Proto2 should not validate UTF-8. - b, err := Marshal(tt.proto2) - if err != nil { - t.Errorf("Marshal(proto2.%s) = %v, want nil", tt.label, err) - } - if !bytes.Equal(b, tt.want) { - t.Errorf("Marshal(proto2.%s) = %x, want %x", tt.label, b, tt.want) - } - - m := Clone(tt.proto2) - m.Reset() - if err = Unmarshal(tt.want, m); err != nil { - t.Errorf("Unmarshal(proto2.%s) = %v, want nil", tt.label, err) - } - if !Equal(m, tt.proto2) { - t.Errorf("proto2.%s: output mismatch:\ngot %v\nwant %v", tt.label, m, tt.proto2) - } - - // Proto3 should validate UTF-8. - b, err = Marshal(tt.proto3) - if err == nil { - t.Errorf("Marshal(proto3.%s) = %v, want non-nil", tt.label, err) - } - if !bytes.Equal(b, tt.want) { - t.Errorf("Marshal(proto3.%s) = %x, want %x", tt.label, b, tt.want) - } - - m = Clone(tt.proto3) - m.Reset() - err = Unmarshal(tt.want, m) - if err == nil { - t.Errorf("Unmarshal(proto3.%s) = %v, want non-nil", tt.label, err) - } - if !Equal(m, tt.proto3) { - t.Errorf("proto3.%s: output mismatch:\ngot %v\nwant %v", tt.label, m, tt.proto2) - } - } -} - -func TestRequired(t *testing.T) { - // The F_BoolRequired field appears after all of the required fields. - // It should still be handled even after multiple required field violations. - m := &GoTest{F_BoolRequired: Bool(true)} - got, err := Marshal(m) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Errorf("Marshal() = %v, want RequiredNotSetError error", err) - } - if want := []byte{0x50, 0x01}; !bytes.Equal(got, want) { - t.Errorf("Marshal() = %x, want %x", got, want) - } - - m = new(GoTest) - err = Unmarshal(got, m) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Errorf("Marshal() = %v, want RequiredNotSetError error", err) - } - if !m.GetF_BoolRequired() { - t.Error("m.F_BoolRequired = false, want true") - } -} - -// Benchmarks - -func testMsg() *GoTest { - pb := initGoTest(true) - const N = 1000 // Internally the library starts much smaller. - pb.F_Int32Repeated = make([]int32, N) - pb.F_DoubleRepeated = make([]float64, N) - for i := 0; i < N; i++ { - pb.F_Int32Repeated[i] = int32(i) - pb.F_DoubleRepeated[i] = float64(i) - } - return pb -} - -func bytesMsg() *GoTest { - pb := initGoTest(true) - buf := make([]byte, 4000) - for i := range buf { - buf[i] = byte(i) - } - pb.F_BytesDefaulted = buf - return pb -} - -func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { - d, _ := marshal(pb) - b.SetBytes(int64(len(d))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - marshal(pb) - } -} - -func benchmarkBufferMarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - p.Reset() - err := p.Marshal(pb0) - return p.Bytes(), err - }) -} - -func benchmarkSize(b *testing.B, pb Message) { - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - Size(pb) - return nil, nil - }) -} - -func newOf(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - return reflect.New(in.Type().Elem()).Interface().(Message) -} - -func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { - d, _ := Marshal(pb) - b.SetBytes(int64(len(d))) - pbd := newOf(pb) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - unmarshal(d, pbd) - } -} - -func benchmarkBufferUnmarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { - p.SetBuf(d) - return p.Unmarshal(pb0) - }) -} - -// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} - -func BenchmarkMarshal(b *testing.B) { - benchmarkMarshal(b, testMsg(), Marshal) -} - -func BenchmarkBufferMarshal(b *testing.B) { - benchmarkBufferMarshal(b, testMsg()) -} - -func BenchmarkSize(b *testing.B) { - benchmarkSize(b, testMsg()) -} - -func BenchmarkUnmarshal(b *testing.B) { - benchmarkUnmarshal(b, testMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshal(b *testing.B) { - benchmarkBufferUnmarshal(b, testMsg()) -} - -func BenchmarkMarshalBytes(b *testing.B) { - benchmarkMarshal(b, bytesMsg(), Marshal) -} - -func BenchmarkBufferMarshalBytes(b *testing.B) { - benchmarkBufferMarshal(b, bytesMsg()) -} - -func BenchmarkSizeBytes(b *testing.B) { - benchmarkSize(b, bytesMsg()) -} - -func BenchmarkUnmarshalBytes(b *testing.B) { - benchmarkUnmarshal(b, bytesMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshalBytes(b *testing.B) { - benchmarkBufferUnmarshal(b, bytesMsg()) -} - -func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { - b.StopTimer() - pb := initGoTestField() - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - pbd := new(GoTestField) - p := NewBuffer(nil) - p.Marshal(pb) - p.Marshal(skip) - p2 := NewBuffer(nil) - - b.StartTimer() - for i := 0; i < b.N; i++ { - p2.SetBuf(p.Bytes()) - p2.Unmarshal(pbd) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/any_test.go b/vendor/github.com/golang/protobuf/proto/any_test.go deleted file mode 100644 index 56fc97c1..00000000 --- a/vendor/github.com/golang/protobuf/proto/any_test.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "strings" - "testing" - - "github.com/golang/protobuf/proto" - - pb "github.com/golang/protobuf/proto/proto3_proto" - testpb "github.com/golang/protobuf/proto/test_proto" - anypb "github.com/golang/protobuf/ptypes/any" -) - -var ( - expandedMarshaler = proto.TextMarshaler{ExpandAny: true} - expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} -) - -// anyEqual reports whether two messages which may be google.protobuf.Any or may -// contain google.protobuf.Any fields are equal. We can't use proto.Equal for -// comparison, because semantically equivalent messages may be marshaled to -// binary in different tag order. Instead, trust that TextMarshaler with -// ExpandAny option works and compare the text marshaling results. -func anyEqual(got, want proto.Message) bool { - // if messages are proto.Equal, no need to marshal. - if proto.Equal(got, want) { - return true - } - g := expandedMarshaler.Text(got) - w := expandedMarshaler.Text(want) - return g == w -} - -type golden struct { - m proto.Message - t, c string -} - -var goldenMessages = makeGolden() - -func makeGolden() []golden { - nested := &pb.Nested{Bunny: "Monty"} - nb, err := proto.Marshal(nested) - if err != nil { - panic(err) - } - m1 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, - } - m2 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, - } - m3 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, - } - m4 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, - } - m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} - - any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} - proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) - proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) - any1b, err := proto.Marshal(any1) - if err != nil { - panic(err) - } - any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} - proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) - any2b, err := proto.Marshal(any2) - if err != nil { - panic(err) - } - m6 := &pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, - ManyThings: []*anypb.Any{ - &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, - &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, - }, - } - - const ( - m1Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/proto3_proto.Nested]: < - bunny: "Monty" - > -> -` - m2Golden = ` -name: "David" -result_count: 47 -anything: < - ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < - bunny: "Monty" - > -> -` - m3Golden = ` -name: "David" -result_count: 47 -anything: < - ["type.googleapis.com/\"/proto3_proto.Nested"]: < - bunny: "Monty" - > -> -` - m4Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Monty" - > -> -` - m5Golden = ` -[type.googleapis.com/proto3_proto.Nested]: < - bunny: "Monty" -> -` - m6Golden = ` -name: "David" -result_count: 47 -anything: < - [type.googleapis.com/test_proto.MyMessage]: < - count: 47 - name: "David" - [test_proto.Ext.more]: < - data: "foo" - > - [test_proto.Ext.text]: "bar" - > -> -many_things: < - [type.googleapis.com/test_proto.MyMessage]: < - count: 42 - bikeshed: GREEN - rep_bytes: "roboto" - [test_proto.Ext.more]: < - data: "baz" - > - > -> -many_things: < - [type.googleapis.com/test_proto.MyMessage]: < - count: 47 - name: "David" - [test_proto.Ext.more]: < - data: "foo" - > - [test_proto.Ext.text]: "bar" - > -> -` - ) - return []golden{ - {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, - {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, - {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, - {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, - {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, - {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, - } -} - -func TestMarshalGolden(t *testing.T) { - for _, tt := range goldenMessages { - if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { - t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) - } - if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { - t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) - } - } -} - -func TestUnmarshalGolden(t *testing.T) { - for _, tt := range goldenMessages { - want := tt.m - got := proto.Clone(tt.m) - got.Reset() - if err := proto.UnmarshalText(tt.t, got); err != nil { - t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) - } - if !anyEqual(got, want) { - t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) - } - got.Reset() - if err := proto.UnmarshalText(tt.c, got); err != nil { - t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) - } - if !anyEqual(got, want) { - t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) - } - } -} - -func TestMarshalUnknownAny(t *testing.T) { - m := &pb.Message{ - Anything: &anypb.Any{ - TypeUrl: "foo", - Value: []byte("bar"), - }, - } - want := `anything: < - type_url: "foo" - value: "bar" -> -` - got := expandedMarshaler.Text(m) - if got != want { - t.Errorf("got\n`%s`\nwant\n`%s`", got, want) - } -} - -func TestAmbiguousAny(t *testing.T) { - pb := &anypb.Any{} - err := proto.UnmarshalText(` - type_url: "ttt/proto3_proto.Nested" - value: "\n\x05Monty" - `, pb) - t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) - if err != nil { - t.Errorf("failed to parse ambiguous Any message: %v", err) - } -} - -func TestUnmarshalOverwriteAny(t *testing.T) { - pb := &anypb.Any{} - err := proto.UnmarshalText(` - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Monty" - > - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Rabbit of Caerbannog" - > - `, pb) - want := `line 7: Any message unpacked multiple times, or "type_url" already set` - if err.Error() != want { - t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) - } -} - -func TestUnmarshalAnyMixAndMatch(t *testing.T) { - pb := &anypb.Any{} - err := proto.UnmarshalText(` - value: "\n\x05Monty" - [type.googleapis.com/a/path/proto3_proto.Nested]: < - bunny: "Rabbit of Caerbannog" - > - `, pb) - want := `line 5: Any message unpacked multiple times, or "value" already set` - if err.Error() != want { - t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 00000000..e810e6fe --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249f..00000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/clone_test.go b/vendor/github.com/golang/protobuf/proto/clone_test.go deleted file mode 100644 index 0d3b1273..00000000 --- a/vendor/github.com/golang/protobuf/proto/clone_test.go +++ /dev/null @@ -1,390 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/test_proto" -) - -var cloneTestMessage = &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, -} - -func init() { - ext := &pb.Ext{ - Data: proto.String("extension"), - } - if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { - panic("SetExtension: " + err.Error()) - } -} - -func TestClone(t *testing.T) { - m := proto.Clone(cloneTestMessage).(*pb.MyMessage) - if !proto.Equal(m, cloneTestMessage) { - t.Fatalf("Clone(%v) = %v", cloneTestMessage, m) - } - - // Verify it was a deep copy. - *m.Inner.Port++ - if proto.Equal(m, cloneTestMessage) { - t.Error("Mutating clone changed the original") - } - // Byte fields and repeated fields should be copied. - if &m.Pet[0] == &cloneTestMessage.Pet[0] { - t.Error("Pet: repeated field not copied") - } - if &m.Others[0] == &cloneTestMessage.Others[0] { - t.Error("Others: repeated field not copied") - } - if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { - t.Error("Others[0].Value: bytes field not copied") - } - if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { - t.Error("RepBytes: repeated field not copied") - } - if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { - t.Error("RepBytes[0]: bytes field not copied") - } -} - -func TestCloneNil(t *testing.T) { - var m *pb.MyMessage - if c := proto.Clone(m); !proto.Equal(m, c) { - t.Errorf("Clone(%v) = %v", m, c) - } -} - -var mergeTests = []struct { - src, dst, want proto.Message -}{ - { - src: &pb.MyMessage{ - Count: proto.Int32(42), - }, - dst: &pb.MyMessage{ - Name: proto.String("Dave"), - }, - want: &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - }, - }, - { - src: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - }, - Pet: []string{"horsey"}, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - }, - dst: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - { - // Explicitly test a src=nil field - Inner: nil, - }, - }, - }, - want: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty", "horsey"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - {}, - { - Value: []byte("some bytes"), - }, - }, - }, - }, - { - src: &pb.MyMessage{ - RepBytes: [][]byte{[]byte("wow")}, - }, - dst: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham")}, - }, - want: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, - }, - }, - // Check that a scalar bytes field replaces rather than appends. - { - src: &pb.OtherMessage{Value: []byte("foo")}, - dst: &pb.OtherMessage{Value: []byte("bar")}, - want: &pb.OtherMessage{Value: []byte("foo")}, - }, - { - src: &pb.MessageWithMap{ - NameMapping: map[int32]string{6: "Nigel"}, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - 0x4002: &pb.FloatingPoint{ - F: proto.Float64(2.0), - }, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - dst: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Bruce", // should be overwritten - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4002: &pb.FloatingPoint{ - F: proto.Float64(3.0), - Exact: proto.Bool(true), - }, // the entire message should be overwritten - }, - }, - want: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Nigel", - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - 0x4002: &pb.FloatingPoint{ - F: proto.Float64(2.0), - }, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - }, - // proto3 shouldn't merge zero values, - // in the same way that proto2 shouldn't merge nils. - { - src: &proto3pb.Message{ - Name: "Aaron", - Data: []byte(""), // zero value, but not nil - }, - dst: &proto3pb.Message{ - HeightInCm: 176, - Data: []byte("texas!"), - }, - want: &proto3pb.Message{ - Name: "Aaron", - HeightInCm: 176, - Data: []byte("texas!"), - }, - }, - { // Oneof fields should merge by assignment. - src: &pb.Communique{Union: &pb.Communique_Number{41}}, - dst: &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}}, - want: &pb.Communique{Union: &pb.Communique_Number{41}}, - }, - { // Oneof nil is the same as not set. - src: &pb.Communique{}, - dst: &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}}, - want: &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}}, - }, - { - src: &pb.Communique{Union: &pb.Communique_Number{1337}}, - dst: &pb.Communique{}, - want: &pb.Communique{Union: &pb.Communique_Number{1337}}, - }, - { - src: &pb.Communique{Union: &pb.Communique_Col{pb.MyMessage_RED}}, - dst: &pb.Communique{}, - want: &pb.Communique{Union: &pb.Communique_Col{pb.MyMessage_RED}}, - }, - { - src: &pb.Communique{Union: &pb.Communique_Data{[]byte("hello")}}, - dst: &pb.Communique{}, - want: &pb.Communique{Union: &pb.Communique_Data{[]byte("hello")}}, - }, - { - src: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{BytesField: []byte{1, 2, 3}}}}, - dst: &pb.Communique{}, - want: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{BytesField: []byte{1, 2, 3}}}}, - }, - { - src: &pb.Communique{Union: &pb.Communique_Msg{}}, - dst: &pb.Communique{}, - want: &pb.Communique{Union: &pb.Communique_Msg{}}, - }, - { - src: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{StringField: proto.String("123")}}}, - dst: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{BytesField: []byte{1, 2, 3}}}}, - want: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{StringField: proto.String("123"), BytesField: []byte{1, 2, 3}}}}, - }, - { - src: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": &proto3pb.Nested{Cute: true}, // replace - "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert - }, - }, - dst: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced - "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep - }, - }, - want: &proto3pb.Message{ - Terrain: map[string]*proto3pb.Nested{ - "kay_a": &proto3pb.Nested{Cute: true}, - "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, - "kay_c": &proto3pb.Nested{Bunny: "bunny"}, - }, - }, - }, - { - src: &pb.GoTest{ - F_BoolRepeated: []bool{}, - F_Int32Repeated: []int32{}, - F_Int64Repeated: []int64{}, - F_Uint32Repeated: []uint32{}, - F_Uint64Repeated: []uint64{}, - F_FloatRepeated: []float32{}, - F_DoubleRepeated: []float64{}, - F_StringRepeated: []string{}, - F_BytesRepeated: [][]byte{}, - }, - dst: &pb.GoTest{}, - want: &pb.GoTest{ - F_BoolRepeated: []bool{}, - F_Int32Repeated: []int32{}, - F_Int64Repeated: []int64{}, - F_Uint32Repeated: []uint32{}, - F_Uint64Repeated: []uint64{}, - F_FloatRepeated: []float32{}, - F_DoubleRepeated: []float64{}, - F_StringRepeated: []string{}, - F_BytesRepeated: [][]byte{}, - }, - }, - { - src: &pb.GoTest{}, - dst: &pb.GoTest{ - F_BoolRepeated: []bool{}, - F_Int32Repeated: []int32{}, - F_Int64Repeated: []int64{}, - F_Uint32Repeated: []uint32{}, - F_Uint64Repeated: []uint64{}, - F_FloatRepeated: []float32{}, - F_DoubleRepeated: []float64{}, - F_StringRepeated: []string{}, - F_BytesRepeated: [][]byte{}, - }, - want: &pb.GoTest{ - F_BoolRepeated: []bool{}, - F_Int32Repeated: []int32{}, - F_Int64Repeated: []int64{}, - F_Uint32Repeated: []uint32{}, - F_Uint64Repeated: []uint64{}, - F_FloatRepeated: []float32{}, - F_DoubleRepeated: []float64{}, - F_StringRepeated: []string{}, - F_BytesRepeated: [][]byte{}, - }, - }, - { - src: &pb.GoTest{ - F_BytesRepeated: [][]byte{nil, []byte{}, []byte{0}}, - }, - dst: &pb.GoTest{}, - want: &pb.GoTest{ - F_BytesRepeated: [][]byte{nil, []byte{}, []byte{0}}, - }, - }, - { - src: &pb.MyMessage{ - Others: []*pb.OtherMessage{}, - }, - dst: &pb.MyMessage{}, - want: &pb.MyMessage{ - Others: []*pb.OtherMessage{}, - }, - }, -} - -func TestMerge(t *testing.T) { - for _, m := range mergeTests { - got := proto.Clone(m.dst) - if !proto.Equal(got, m.dst) { - t.Errorf("Clone()\ngot %v\nwant %v", got, m.dst) - continue - } - proto.Merge(got, m.src) - if !proto.Equal(got, m.want) { - t.Errorf("Merge(%v, %v)\ngot %v\nwant %v", m.dst, m.src, got, m.want) - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index d9aa3c42..00000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,428 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - // x -= 0x80 << 63 // Always zero. - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/decode_test.go b/vendor/github.com/golang/protobuf/proto/decode_test.go deleted file mode 100644 index 949be3ab..00000000 --- a/vendor/github.com/golang/protobuf/proto/decode_test.go +++ /dev/null @@ -1,255 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.7 - -package proto_test - -import ( - "fmt" - "testing" - - "github.com/golang/protobuf/proto" - tpb "github.com/golang/protobuf/proto/proto3_proto" -) - -var msgBlackhole = new(tpb.Message) - -// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and -// 2 bytes long). -func BenchmarkVarint32ArraySmall(b *testing.B) { - for i := uint(1); i <= 10; i++ { - dist := genInt32Dist([7]int{0, 3, 1}, 1< 0 { + m.SetUnknown(nil) } } diff --git a/vendor/github.com/golang/protobuf/proto/discard_test.go b/vendor/github.com/golang/protobuf/proto/discard_test.go deleted file mode 100644 index a2ff5509..00000000 --- a/vendor/github.com/golang/protobuf/proto/discard_test.go +++ /dev/null @@ -1,170 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/test_proto" -) - -func TestDiscardUnknown(t *testing.T) { - tests := []struct { - desc string - in, want proto.Message - }{{ - desc: "Nil", - in: nil, want: nil, // Should not panic - }, { - desc: "NilPtr", - in: (*proto3pb.Message)(nil), want: (*proto3pb.Message)(nil), // Should not panic - }, { - desc: "Nested", - in: &proto3pb.Message{ - Name: "Aaron", - Nested: &proto3pb.Nested{Cute: true, XXX_unrecognized: []byte("blah")}, - XXX_unrecognized: []byte("blah"), - }, - want: &proto3pb.Message{ - Name: "Aaron", - Nested: &proto3pb.Nested{Cute: true}, - }, - }, { - desc: "Slice", - in: &proto3pb.Message{ - Name: "Aaron", - Children: []*proto3pb.Message{ - {Name: "Sarah", XXX_unrecognized: []byte("blah")}, - {Name: "Abraham", XXX_unrecognized: []byte("blah")}, - }, - XXX_unrecognized: []byte("blah"), - }, - want: &proto3pb.Message{ - Name: "Aaron", - Children: []*proto3pb.Message{ - {Name: "Sarah"}, - {Name: "Abraham"}, - }, - }, - }, { - desc: "OneOf", - in: &pb.Communique{ - Union: &pb.Communique_Msg{&pb.Strings{ - StringField: proto.String("123"), - XXX_unrecognized: []byte("blah"), - }}, - XXX_unrecognized: []byte("blah"), - }, - want: &pb.Communique{ - Union: &pb.Communique_Msg{&pb.Strings{StringField: proto.String("123")}}, - }, - }, { - desc: "Map", - in: &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4002: &pb.FloatingPoint{ - Exact: proto.Bool(true), - XXX_unrecognized: []byte("blah"), - }, - }}, - want: &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4002: &pb.FloatingPoint{Exact: proto.Bool(true)}, - }}, - }, { - desc: "Extension", - in: func() proto.Message { - m := &pb.MyMessage{ - Count: proto.Int32(42), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - XXX_unrecognized: []byte("blah"), - }, - XXX_unrecognized: []byte("blah"), - } - proto.SetExtension(m, pb.E_Ext_More, &pb.Ext{ - Data: proto.String("extension"), - XXX_unrecognized: []byte("blah"), - }) - return m - }(), - want: func() proto.Message { - m := &pb.MyMessage{ - Count: proto.Int32(42), - Somegroup: &pb.MyMessage_SomeGroup{GroupField: proto.Int32(6)}, - } - proto.SetExtension(m, pb.E_Ext_More, &pb.Ext{Data: proto.String("extension")}) - return m - }(), - }} - - // Test the legacy code path. - for _, tt := range tests { - // Clone the input so that we don't alter the original. - in := tt.in - if in != nil { - in = proto.Clone(tt.in) - } - - var m LegacyMessage - m.Message, _ = in.(*proto3pb.Message) - m.Communique, _ = in.(*pb.Communique) - m.MessageWithMap, _ = in.(*pb.MessageWithMap) - m.MyMessage, _ = in.(*pb.MyMessage) - proto.DiscardUnknown(&m) - if !proto.Equal(in, tt.want) { - t.Errorf("test %s/Legacy, expected unknown fields to be discarded\ngot %v\nwant %v", tt.desc, in, tt.want) - } - } - - for _, tt := range tests { - proto.DiscardUnknown(tt.in) - if !proto.Equal(tt.in, tt.want) { - t.Errorf("test %s, expected unknown fields to be discarded\ngot %v\nwant %v", tt.desc, tt.in, tt.want) - } - } -} - -// LegacyMessage is a proto.Message that has several nested messages. -// This does not have the XXX_DiscardUnknown method and so forces DiscardUnknown -// to use the legacy fallback logic. -type LegacyMessage struct { - Message *proto3pb.Message - Communique *pb.Communique - MessageWithMap *pb.MessageWithMap - MyMessage *pb.MyMessage -} - -func (m *LegacyMessage) Reset() { *m = LegacyMessage{} } -func (m *LegacyMessage) String() string { return proto.CompactTextString(m) } -func (*LegacyMessage) ProtoMessage() {} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2c..00000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/encode_test.go b/vendor/github.com/golang/protobuf/proto/encode_test.go deleted file mode 100644 index a7209475..00000000 --- a/vendor/github.com/golang/protobuf/proto/encode_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.7 - -package proto_test - -import ( - "strconv" - "testing" - - "github.com/golang/protobuf/proto" - tpb "github.com/golang/protobuf/proto/proto3_proto" - "github.com/golang/protobuf/ptypes" -) - -var ( - blackhole []byte -) - -// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the -// same. -func BenchmarkAny(b *testing.B) { - data := make([]byte, 1<<20) - quantum := 1 << 10 - for i := uint(0); i <= 10; i++ { - b.Run(strconv.Itoa(quantum< 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte + return has } -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false + }) } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok + clearUnknown(mr, fieldNum(xt.Field)) } -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) } -// GetExtension retrieves a proto2 extended field from pb. +// GetExtension retrieves a proto2 extended field from m. // // If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), // then GetExtension parses the encoded field and returns a Go value of the specified type. // If the field is not present, then the default value is returned (if one is specified), // otherwise ErrMissingExtension is reported. // -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil } - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) + } + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) } - return e.value, nil } - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: + return nil, ErrMissingExtension } - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil + return v, nil } -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil } + return protoregistry.GlobalTypes.FindExtensionByName(field) +} - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil } + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable } - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) + if err != nil { + if err == ErrMissingExtension { + continue + } + return vs, err + } + vs[i] = v } - return value.Interface(), nil + return vs, nil } -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable + } - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) + } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() } + } - if len(b) == 0 { - break - } + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - return value.Interface(), nil + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) + return nil } -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) } + b0 = b0[n:] } - return -} -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd } } - - extensions = append(extensions, desc) + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] } - return extensions, nil -} -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) } + return xts, nil +} - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) } -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + return true + default: + return false } } -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) + } + bi = bi[n:] } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) } - m[desc.Field] = desc } -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 } diff --git a/vendor/github.com/golang/protobuf/proto/extensions_test.go b/vendor/github.com/golang/protobuf/proto/extensions_test.go deleted file mode 100644 index dc69fe97..00000000 --- a/vendor/github.com/golang/protobuf/proto/extensions_test.go +++ /dev/null @@ -1,688 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strings" - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/test_proto" - "golang.org/x/sync/errgroup" -) - -func TestGetExtensionsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", err) - } - exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ - pb.E_Ext_More, - pb.E_Ext_Text, - }) - if err != nil { - t.Fatalf("GetExtensions() failed: %s", err) - } - if exts[0] != ext1 { - t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) - } - if exts[1] != nil { - t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) - } -} - -func TestGetExtensionWithEmptyBuffer(t *testing.T) { - // Make sure that GetExtension returns an error if its - // undecoded buffer is empty. - msg := &pb.MyMessage{} - proto.SetRawExtension(msg, pb.E_Ext_More.Field, []byte{}) - _, err := proto.GetExtension(msg, pb.E_Ext_More) - if want := io.ErrUnexpectedEOF; err != want { - t.Errorf("unexpected error in GetExtension from empty buffer: got %v, want %v", err, want) - } -} - -func TestGetExtensionForIncompleteDesc(t *testing.T) { - msg := &pb.MyMessage{Count: proto.Int32(0)} - extdesc1 := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 123456789, - Name: "a.b", - Tag: "varint,123456789,opt", - } - ext1 := proto.Bool(true) - if err := proto.SetExtension(msg, extdesc1, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", err) - } - extdesc2 := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 123456790, - Name: "a.c", - Tag: "bytes,123456790,opt", - } - ext2 := []byte{0, 1, 2, 3, 4, 5, 6, 7} - if err := proto.SetExtension(msg, extdesc2, ext2); err != nil { - t.Fatalf("Could not set ext2: %s", err) - } - extdesc3 := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*pb.Ext)(nil), - Field: 123456791, - Name: "a.d", - Tag: "bytes,123456791,opt", - } - ext3 := &pb.Ext{Data: proto.String("foo")} - if err := proto.SetExtension(msg, extdesc3, ext3); err != nil { - t.Fatalf("Could not set ext3: %s", err) - } - - b, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Could not marshal msg: %v", err) - } - if err := proto.Unmarshal(b, msg); err != nil { - t.Fatalf("Could not unmarshal into msg: %v", err) - } - - var expected proto.Buffer - if err := expected.EncodeVarint(uint64((extdesc1.Field << 3) | proto.WireVarint)); err != nil { - t.Fatalf("failed to compute expected prefix for ext1: %s", err) - } - if err := expected.EncodeVarint(1 /* bool true */); err != nil { - t.Fatalf("failed to compute expected value for ext1: %s", err) - } - - if b, err := proto.GetExtension(msg, &proto.ExtensionDesc{Field: extdesc1.Field}); err != nil { - t.Fatalf("Failed to get raw value for ext1: %s", err) - } else if !reflect.DeepEqual(b, expected.Bytes()) { - t.Fatalf("Raw value for ext1: got %v, want %v", b, expected.Bytes()) - } - - expected = proto.Buffer{} // reset - if err := expected.EncodeVarint(uint64((extdesc2.Field << 3) | proto.WireBytes)); err != nil { - t.Fatalf("failed to compute expected prefix for ext2: %s", err) - } - if err := expected.EncodeRawBytes(ext2); err != nil { - t.Fatalf("failed to compute expected value for ext2: %s", err) - } - - if b, err := proto.GetExtension(msg, &proto.ExtensionDesc{Field: extdesc2.Field}); err != nil { - t.Fatalf("Failed to get raw value for ext2: %s", err) - } else if !reflect.DeepEqual(b, expected.Bytes()) { - t.Fatalf("Raw value for ext2: got %v, want %v", b, expected.Bytes()) - } - - expected = proto.Buffer{} // reset - if err := expected.EncodeVarint(uint64((extdesc3.Field << 3) | proto.WireBytes)); err != nil { - t.Fatalf("failed to compute expected prefix for ext3: %s", err) - } - if b, err := proto.Marshal(ext3); err != nil { - t.Fatalf("failed to compute expected value for ext3: %s", err) - } else if err := expected.EncodeRawBytes(b); err != nil { - t.Fatalf("failed to compute expected value for ext3: %s", err) - } - - if b, err := proto.GetExtension(msg, &proto.ExtensionDesc{Field: extdesc3.Field}); err != nil { - t.Fatalf("Failed to get raw value for ext3: %s", err) - } else if !reflect.DeepEqual(b, expected.Bytes()) { - t.Fatalf("Raw value for ext3: got %v, want %v", b, expected.Bytes()) - } -} - -func TestExtensionDescsWithUnregisteredExtensions(t *testing.T) { - msg := &pb.MyMessage{Count: proto.Int32(0)} - extdesc1 := pb.E_Ext_More - if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil { - t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err) - } - - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, extdesc1, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", err) - } - extdesc2 := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 123456789, - Name: "a.b", - Tag: "varint,123456789,opt", - } - ext2 := proto.Bool(false) - if err := proto.SetExtension(msg, extdesc2, ext2); err != nil { - t.Fatalf("Could not set ext2: %s", err) - } - - b, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Could not marshal msg: %v", err) - } - if err := proto.Unmarshal(b, msg); err != nil { - t.Fatalf("Could not unmarshal into msg: %v", err) - } - - descs, err := proto.ExtensionDescs(msg) - if err != nil { - t.Fatalf("proto.ExtensionDescs: got error %v", err) - } - sortExtDescs(descs) - wantDescs := []*proto.ExtensionDesc{extdesc1, {Field: extdesc2.Field}} - if !reflect.DeepEqual(descs, wantDescs) { - t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs) - } -} - -type ExtensionDescSlice []*proto.ExtensionDesc - -func (s ExtensionDescSlice) Len() int { return len(s) } -func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field } -func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func sortExtDescs(s []*proto.ExtensionDesc) { - sort.Sort(ExtensionDescSlice(s)) -} - -func TestGetExtensionStability(t *testing.T) { - check := func(m *pb.MyMessage) bool { - ext1, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - ext2, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - return ext1 == ext2 - } - msg := &pb.MyMessage{Count: proto.Int32(4)} - ext0 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { - t.Fatalf("Could not set ext1: %s", ext0) - } - if !check(msg) { - t.Errorf("GetExtension() not stable before marshaling") - } - bb, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Marshal() failed: %s", err) - } - msg1 := &pb.MyMessage{} - err = proto.Unmarshal(bb, msg1) - if err != nil { - t.Fatalf("Unmarshal() failed: %s", err) - } - if !check(msg1) { - t.Errorf("GetExtension() not stable after unmarshaling") - } -} - -func TestGetExtensionDefaults(t *testing.T) { - var setFloat64 float64 = 1 - var setFloat32 float32 = 2 - var setInt32 int32 = 3 - var setInt64 int64 = 4 - var setUint32 uint32 = 5 - var setUint64 uint64 = 6 - var setBool = true - var setBool2 = false - var setString = "Goodnight string" - var setBytes = []byte("Goodnight bytes") - var setEnum = pb.DefaultsMessage_TWO - - type testcase struct { - ext *proto.ExtensionDesc // Extension we are testing. - want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). - def interface{} // Expected value of extension after ClearExtension(). - } - tests := []testcase{ - {pb.E_NoDefaultDouble, setFloat64, nil}, - {pb.E_NoDefaultFloat, setFloat32, nil}, - {pb.E_NoDefaultInt32, setInt32, nil}, - {pb.E_NoDefaultInt64, setInt64, nil}, - {pb.E_NoDefaultUint32, setUint32, nil}, - {pb.E_NoDefaultUint64, setUint64, nil}, - {pb.E_NoDefaultSint32, setInt32, nil}, - {pb.E_NoDefaultSint64, setInt64, nil}, - {pb.E_NoDefaultFixed32, setUint32, nil}, - {pb.E_NoDefaultFixed64, setUint64, nil}, - {pb.E_NoDefaultSfixed32, setInt32, nil}, - {pb.E_NoDefaultSfixed64, setInt64, nil}, - {pb.E_NoDefaultBool, setBool, nil}, - {pb.E_NoDefaultBool, setBool2, nil}, - {pb.E_NoDefaultString, setString, nil}, - {pb.E_NoDefaultBytes, setBytes, nil}, - {pb.E_NoDefaultEnum, setEnum, nil}, - {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, - {pb.E_DefaultFloat, setFloat32, float32(3.14)}, - {pb.E_DefaultInt32, setInt32, int32(42)}, - {pb.E_DefaultInt64, setInt64, int64(43)}, - {pb.E_DefaultUint32, setUint32, uint32(44)}, - {pb.E_DefaultUint64, setUint64, uint64(45)}, - {pb.E_DefaultSint32, setInt32, int32(46)}, - {pb.E_DefaultSint64, setInt64, int64(47)}, - {pb.E_DefaultFixed32, setUint32, uint32(48)}, - {pb.E_DefaultFixed64, setUint64, uint64(49)}, - {pb.E_DefaultSfixed32, setInt32, int32(50)}, - {pb.E_DefaultSfixed64, setInt64, int64(51)}, - {pb.E_DefaultBool, setBool, true}, - {pb.E_DefaultBool, setBool2, true}, - {pb.E_DefaultString, setString, "Hello, string,def=foo"}, - {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, - {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, - } - - checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { - val, err := proto.GetExtension(msg, test.ext) - if err != nil { - if valWant != nil { - return fmt.Errorf("GetExtension(): %s", err) - } - if want := proto.ErrMissingExtension; err != want { - return fmt.Errorf("Unexpected error: got %v, want %v", err, want) - } - return nil - } - - // All proto2 extension values are either a pointer to a value or a slice of values. - ty := reflect.TypeOf(val) - tyWant := reflect.TypeOf(test.ext.ExtensionType) - if got, want := ty, tyWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) - } - tye := ty.Elem() - tyeWant := tyWant.Elem() - if got, want := tye, tyeWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) - } - - // Check the name of the type of the value. - // If it is an enum it will be type int32 with the name of the enum. - if got, want := tye.Name(), tye.Name(); got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) - } - - // Check that value is what we expect. - // If we have a pointer in val, get the value it points to. - valExp := val - if ty.Kind() == reflect.Ptr { - valExp = reflect.ValueOf(val).Elem().Interface() - } - if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { - return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) - } - - return nil - } - - setTo := func(test testcase) interface{} { - setTo := reflect.ValueOf(test.want) - if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { - setTo = reflect.New(typ).Elem() - setTo.Set(reflect.New(setTo.Type().Elem())) - setTo.Elem().Set(reflect.ValueOf(test.want)) - } - return setTo.Interface() - } - - for _, test := range tests { - msg := &pb.DefaultsMessage{} - name := test.ext.Name - - // Check the initial value. - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - - // Set the per-type value and check value. - name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) - if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { - t.Errorf("%s: SetExtension(): %v", name, err) - continue - } - if err := checkVal(test, msg, test.want); err != nil { - t.Errorf("%s: %v", name, err) - continue - } - - // Set and check the value. - name += " (cleared)" - proto.ClearExtension(msg, test.ext) - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - } -} - -func TestNilMessage(t *testing.T) { - name := "nil interface" - if got, err := proto.GetExtension(nil, pb.E_Ext_More); err == nil { - t.Errorf("%s: got %T %v, expected to fail", name, got, got) - } else if !strings.Contains(err.Error(), "extendable") { - t.Errorf("%s: got error %v, expected not-extendable error", name, err) - } - - // Regression tests: all functions of the Extension API - // used to panic when passed (*M)(nil), where M is a concrete message - // type. Now they handle this gracefully as a no-op or reported error. - var nilMsg *pb.MyMessage - desc := pb.E_Ext_More - - isNotExtendable := func(err error) bool { - return strings.Contains(fmt.Sprint(err), "not extendable") - } - - if proto.HasExtension(nilMsg, desc) { - t.Error("HasExtension(nil) = true") - } - - if _, err := proto.GetExtensions(nilMsg, []*proto.ExtensionDesc{desc}); !isNotExtendable(err) { - t.Errorf("GetExtensions(nil) = %q (wrong error)", err) - } - - if _, err := proto.ExtensionDescs(nilMsg); !isNotExtendable(err) { - t.Errorf("ExtensionDescs(nil) = %q (wrong error)", err) - } - - if err := proto.SetExtension(nilMsg, desc, nil); !isNotExtendable(err) { - t.Errorf("SetExtension(nil) = %q (wrong error)", err) - } - - proto.ClearExtension(nilMsg, desc) // no-op - proto.ClearAllExtensions(nilMsg) // no-op -} - -func TestExtensionsRoundTrip(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{ - Data: proto.String("hi"), - } - ext2 := &pb.Ext{ - Data: proto.String("there"), - } - exists := proto.HasExtension(msg, pb.E_Ext_More) - if exists { - t.Error("Extension More present unexpectedly") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Error(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { - t.Error(err) - } - e, err := proto.GetExtension(msg, pb.E_Ext_More) - if err != nil { - t.Error(err) - } - x, ok := e.(*pb.Ext) - if !ok { - t.Errorf("e has type %T, expected test_proto.Ext", e) - } else if *x.Data != "there" { - t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) - } - proto.ClearExtension(msg, pb.E_Ext_More) - if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { - t.Errorf("got %v, expected ErrMissingExtension", e) - } - if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { - t.Error("expected bad extension error, got nil") - } - if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { - t.Error("expected extension err") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { - t.Error("expected some sort of type mismatch error, got nil") - } -} - -func TestNilExtension(t *testing.T) { - msg := &pb.MyMessage{ - Count: proto.Int32(1), - } - if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { - t.Fatal(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { - t.Error("expected SetExtension to fail due to a nil extension") - } else if want := fmt.Sprintf("proto: SetExtension called with nil value of type %T", new(pb.Ext)); err.Error() != want { - t.Errorf("expected error %v, got %v", want, err) - } - // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update - // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. -} - -func TestMarshalUnmarshalRepeatedExtension(t *testing.T) { - // Add a repeated extension to the result. - tests := []struct { - name string - ext []*pb.ComplexExtension - }{ - { - "two fields", - []*pb.ComplexExtension{ - {First: proto.Int32(7)}, - {Second: proto.Int32(11)}, - }, - }, - { - "repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {Third: []int32{2000}}, - }, - }, - { - "two fields and repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {First: proto.Int32(9)}, - {Second: proto.Int32(21)}, - {Third: []int32{2000}}, - }, - }, - } - for _, test := range tests { - // Marshal message with a repeated extension. - msg1 := new(pb.OtherMessage) - err := proto.SetExtension(msg1, pb.E_RComplex, test.ext) - if err != nil { - t.Fatalf("[%s] Error setting extension: %v", test.name, err) - } - b, err := proto.Marshal(msg1) - if err != nil { - t.Fatalf("[%s] Error marshaling message: %v", test.name, err) - } - - // Unmarshal and read the merged proto. - msg2 := new(pb.OtherMessage) - err = proto.Unmarshal(b, msg2) - if err != nil { - t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) - } - e, err := proto.GetExtension(msg2, pb.E_RComplex) - if err != nil { - t.Fatalf("[%s] Error getting extension: %v", test.name, err) - } - ext := e.([]*pb.ComplexExtension) - if ext == nil { - t.Fatalf("[%s] Invalid extension", test.name) - } - if len(ext) != len(test.ext) { - t.Errorf("[%s] Wrong length of ComplexExtension: got: %v want: %v\n", test.name, len(ext), len(test.ext)) - } - for i := range test.ext { - if !proto.Equal(ext[i], test.ext[i]) { - t.Errorf("[%s] Wrong value for ComplexExtension[%d]: got: %v want: %v\n", test.name, i, ext[i], test.ext[i]) - } - } - } -} - -func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { - // We may see multiple instances of the same extension in the wire - // format. For example, the proto compiler may encode custom options in - // this way. Here, we verify that we merge the extensions together. - tests := []struct { - name string - ext []*pb.ComplexExtension - }{ - { - "two fields", - []*pb.ComplexExtension{ - {First: proto.Int32(7)}, - {Second: proto.Int32(11)}, - }, - }, - { - "repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {Third: []int32{2000}}, - }, - }, - { - "two fields and repeated field", - []*pb.ComplexExtension{ - {Third: []int32{1000}}, - {First: proto.Int32(9)}, - {Second: proto.Int32(21)}, - {Third: []int32{2000}}, - }, - }, - } - for _, test := range tests { - var buf bytes.Buffer - var want pb.ComplexExtension - - // Generate a serialized representation of a repeated extension - // by catenating bytes together. - for i, e := range test.ext { - // Merge to create the wanted proto. - proto.Merge(&want, e) - - // serialize the message - msg := new(pb.OtherMessage) - err := proto.SetExtension(msg, pb.E_Complex, e) - if err != nil { - t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err) - } - b, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err) - } - buf.Write(b) - } - - // Unmarshal and read the merged proto. - msg2 := new(pb.OtherMessage) - err := proto.Unmarshal(buf.Bytes(), msg2) - if err != nil { - t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) - } - e, err := proto.GetExtension(msg2, pb.E_Complex) - if err != nil { - t.Fatalf("[%s] Error getting extension: %v", test.name, err) - } - ext := e.(*pb.ComplexExtension) - if ext == nil { - t.Fatalf("[%s] Invalid extension", test.name) - } - if !proto.Equal(ext, &want) { - t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, &want) - } - } -} - -func TestClearAllExtensions(t *testing.T) { - // unregistered extension - desc := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 101010100, - Name: "emptyextension", - Tag: "varint,0,opt", - } - m := &pb.MyMessage{} - if proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) - } - if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { - t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) - } - if !proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m)) - } - proto.ClearAllExtensions(m) - if proto.HasExtension(m, desc) { - t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) - } -} - -func TestMarshalRace(t *testing.T) { - ext := &pb.Ext{} - m := &pb.MyMessage{Count: proto.Int32(4)} - if err := proto.SetExtension(m, pb.E_Ext_More, ext); err != nil { - t.Fatalf("proto.SetExtension(m, desc, true): got error %q, want nil", err) - } - - b, err := proto.Marshal(m) - if err != nil { - t.Fatalf("Could not marshal message: %v", err) - } - if err := proto.Unmarshal(b, m); err != nil { - t.Fatalf("Could not unmarshal message: %v", err) - } - // after Unmarshal, the extension is in undecoded form. - // GetExtension will decode it lazily. Make sure this does - // not race against Marshal. - - var g errgroup.Group - for n := 3; n > 0; n-- { - g.Go(func() error { - _, err := proto.Marshal(m) - return err - }) - g.Go(func() error { - _, err := proto.GetExtension(m, pb.E_Ext_More) - return err - }) - } - if err := g.Wait(); err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 75565cc6..00000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,979 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/golang/protobuf/proto/map_test.go b/vendor/github.com/golang/protobuf/proto/map_test.go deleted file mode 100644 index b1e1529e..00000000 --- a/vendor/github.com/golang/protobuf/proto/map_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package proto_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/golang/protobuf/proto" - ppb "github.com/golang/protobuf/proto/proto3_proto" -) - -func TestMap(t *testing.T) { - var b []byte - fmt.Sscanf("a2010c0a044b657931120456616c31a201130a044b657932120556616c3261120456616c32a201240a044b6579330d05000000120556616c33621a0556616c3361120456616c331505000000a20100a201260a044b657934130a07536f6d6555524c1209536f6d655469746c651a08536e69707065743114", "%x", &b) - - var m ppb.Message - if err := proto.Unmarshal(b, &m); err != nil { - t.Fatalf("proto.Unmarshal error: %v", err) - } - - got := m.StringMap - want := map[string]string{ - "": "", - "Key1": "Val1", - "Key2": "Val2", - "Key3": "Val3", - "Key4": "", - } - - if !reflect.DeepEqual(got, want) { - t.Errorf("maps differ:\ngot %#v\nwant %#v", got, want) - } -} - -func marshalled() []byte { - m := &ppb.IntMaps{} - for i := 0; i < 1000; i++ { - m.Maps = append(m.Maps, &ppb.IntMap{ - Rtt: map[int32]int32{1: 2}, - }) - } - b, err := proto.Marshal(m) - if err != nil { - panic(fmt.Sprintf("Can't marshal %+v: %v", m, err)) - } - return b -} - -func BenchmarkConcurrentMapUnmarshal(b *testing.B) { - in := marshalled() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - var out ppb.IntMaps - if err := proto.Unmarshal(in, &out); err != nil { - b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) - } - } - }) -} - -func BenchmarkSequentialMapUnmarshal(b *testing.B) { - in := marshalled() - b.ResetTimer() - for i := 0; i < b.N; i++ { - var out ppb.IntMaps - if err := proto.Unmarshal(in, &out); err != nil { - b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index 3b6ca41d..00000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,314 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "sync" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set_test.go b/vendor/github.com/golang/protobuf/proto/message_set_test.go deleted file mode 100644 index 2c170c5f..00000000 --- a/vendor/github.com/golang/protobuf/proto/message_set_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "testing" -) - -func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { - // Check that a repeated message set entry will be concatenated. - in := &messageSet{ - Item: []*_MessageSet_Item{ - {TypeId: Int32(12345), Message: []byte("hoo")}, - {TypeId: Int32(12345), Message: []byte("hah")}, - }, - } - b, err := Marshal(in) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("Marshaled bytes: %q", b) - - var extensions XXX_InternalExtensions - if err := UnmarshalMessageSet(b, &extensions); err != nil { - t.Fatalf("UnmarshalMessageSet: %v", err) - } - ext, ok := extensions.p.extensionMap[12345] - if !ok { - t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap) - } - // Skip wire type/field number and length varints. - got := skipVarint(skipVarint(ext.enc)) - if want := []byte("hoohah"); !bytes.Equal(got, want) { - t.Errorf("Combined extension is %q, want %q", got, want) - } -} - -func TestMarshalMessageSetJSON_UnknownType(t *testing.T) { - extMap := map[int32]Extension{12345: Extension{}} - got, err := MarshalMessageSetJSON(extMap) - if err != nil { - t.Fatalf("MarshalMessageSetJSON: %v", err) - } - if want := []byte("{}"); !bytes.Equal(got, want) { - t.Errorf("MarshalMessageSetJSON(%v) = %q, want %q", extMap, got, want) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad908..00000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335d..00000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 50b99b83..dcdc2202 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -1,163 +1,104 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - import ( "fmt" - "log" - "os" "reflect" - "sort" "strconv" "strings" "sync" -) - -const debug bool = false -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" ) -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. + // It is keyed by the protobuf field name. OneofTypes map[string]*OneofProperties } -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. WireType int - Tag int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. Required bool + // Optional reports whether this is a optional field. Optional bool + // Repeated reports whether this is a repeated field. Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) + s += "," + strconv.Itoa(p.Tag) if p.Required { s += ",req" } @@ -171,18 +112,21 @@ func (p *Properties) String() string { s += ",packed" } s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { + if p.JSONName != "" { s += ",json=" + p.JSONName } - if p.proto3 { + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { s += ",proto3" } - if p.oneof { + if p.Oneof { s += ",oneof" } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } if p.HasDefault { s += ",def=" + p.Default } @@ -190,355 +134,173 @@ func (p *Properties) String() string { } // Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": p.Optional = true - case f == "rep": + case s == "req": + p.Required = true + case s == "rep": p.Repeated = true - case f == "packed": + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) + p.Default, i = tag[len("def="):], len(tag) } + tag = strings.TrimPrefix(tag[i:], ",") } } -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - // Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name if tag == "" { return } p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } } -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) +var propertiesCache sync.Map // map[reflect.Type]*StructProperties -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) } -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) } + var hasOneof bool prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) + // Construct a list of properties for each field in the struct. for i := 0; i < t.NumField(); i++ { - f := t.Field(i) p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof } - } - // Re-order prop.order. - sort.Sort(prop) + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field + } - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + prop.Prop = append(prop.Prop, p) } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - // Interpret oneof metadata. + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } + prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T Prop: new(Properties), } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i } - prop.reqCount = reqCount return prop } -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 00000000..5aee89c3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go deleted file mode 100644 index 2bd39923..00000000 --- a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go +++ /dev/null @@ -1,611 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: proto3_proto/proto3.proto - -package proto3_proto - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import test_proto "github.com/golang/protobuf/proto/test_proto" -import any "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Message_Humour int32 - -const ( - Message_UNKNOWN Message_Humour = 0 - Message_PUNS Message_Humour = 1 - Message_SLAPSTICK Message_Humour = 2 - Message_BILL_BAILEY Message_Humour = 3 -) - -var Message_Humour_name = map[int32]string{ - 0: "UNKNOWN", - 1: "PUNS", - 2: "SLAPSTICK", - 3: "BILL_BAILEY", -} -var Message_Humour_value = map[string]int32{ - "UNKNOWN": 0, - "PUNS": 1, - "SLAPSTICK": 2, - "BILL_BAILEY": 3, -} - -func (x Message_Humour) String() string { - return proto.EnumName(Message_Humour_name, int32(x)) -} -func (Message_Humour) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{0, 0} -} - -type Message struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,proto3,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` - HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm,proto3" json:"height_in_cm,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` - ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount,proto3" json:"result_count,omitempty"` - TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman,proto3" json:"true_scotsman,omitempty"` - Score float32 `protobuf:"fixed32,9,opt,name=score,proto3" json:"score,omitempty"` - Key []uint64 `protobuf:"varint,5,rep,packed,name=key,proto3" json:"key,omitempty"` - ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey,proto3" json:"short_key,omitempty"` - Nested *Nested `protobuf:"bytes,6,opt,name=nested,proto3" json:"nested,omitempty"` - RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,proto3,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` - Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain,proto3" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Proto2Field *test_proto.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field,proto3" json:"proto2_field,omitempty"` - Proto2Value map[string]*test_proto.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value,proto3" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Anything *any.Any `protobuf:"bytes,14,opt,name=anything,proto3" json:"anything,omitempty"` - ManyThings []*any.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings,proto3" json:"many_things,omitempty"` - Submessage *Message `protobuf:"bytes,17,opt,name=submessage,proto3" json:"submessage,omitempty"` - Children []*Message `protobuf:"bytes,18,rep,name=children,proto3" json:"children,omitempty"` - StringMap map[string]string `protobuf:"bytes,20,rep,name=string_map,json=stringMap,proto3" json:"string_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{0} -} -func (m *Message) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Message.Unmarshal(m, b) -} -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) -} -func (dst *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(dst, src) -} -func (m *Message) XXX_Size() int { - return xxx_messageInfo_Message.Size(m) -} -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) -} - -var xxx_messageInfo_Message proto.InternalMessageInfo - -func (m *Message) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Message) GetHilarity() Message_Humour { - if m != nil { - return m.Hilarity - } - return Message_UNKNOWN -} - -func (m *Message) GetHeightInCm() uint32 { - if m != nil { - return m.HeightInCm - } - return 0 -} - -func (m *Message) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *Message) GetResultCount() int64 { - if m != nil { - return m.ResultCount - } - return 0 -} - -func (m *Message) GetTrueScotsman() bool { - if m != nil { - return m.TrueScotsman - } - return false -} - -func (m *Message) GetScore() float32 { - if m != nil { - return m.Score - } - return 0 -} - -func (m *Message) GetKey() []uint64 { - if m != nil { - return m.Key - } - return nil -} - -func (m *Message) GetShortKey() []int32 { - if m != nil { - return m.ShortKey - } - return nil -} - -func (m *Message) GetNested() *Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *Message) GetRFunny() []Message_Humour { - if m != nil { - return m.RFunny - } - return nil -} - -func (m *Message) GetTerrain() map[string]*Nested { - if m != nil { - return m.Terrain - } - return nil -} - -func (m *Message) GetProto2Field() *test_proto.SubDefaults { - if m != nil { - return m.Proto2Field - } - return nil -} - -func (m *Message) GetProto2Value() map[string]*test_proto.SubDefaults { - if m != nil { - return m.Proto2Value - } - return nil -} - -func (m *Message) GetAnything() *any.Any { - if m != nil { - return m.Anything - } - return nil -} - -func (m *Message) GetManyThings() []*any.Any { - if m != nil { - return m.ManyThings - } - return nil -} - -func (m *Message) GetSubmessage() *Message { - if m != nil { - return m.Submessage - } - return nil -} - -func (m *Message) GetChildren() []*Message { - if m != nil { - return m.Children - } - return nil -} - -func (m *Message) GetStringMap() map[string]string { - if m != nil { - return m.StringMap - } - return nil -} - -type Nested struct { - Bunny string `protobuf:"bytes,1,opt,name=bunny,proto3" json:"bunny,omitempty"` - Cute bool `protobuf:"varint,2,opt,name=cute,proto3" json:"cute,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Nested) Reset() { *m = Nested{} } -func (m *Nested) String() string { return proto.CompactTextString(m) } -func (*Nested) ProtoMessage() {} -func (*Nested) Descriptor() ([]byte, []int) { - return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{1} -} -func (m *Nested) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Nested.Unmarshal(m, b) -} -func (m *Nested) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Nested.Marshal(b, m, deterministic) -} -func (dst *Nested) XXX_Merge(src proto.Message) { - xxx_messageInfo_Nested.Merge(dst, src) -} -func (m *Nested) XXX_Size() int { - return xxx_messageInfo_Nested.Size(m) -} -func (m *Nested) XXX_DiscardUnknown() { - xxx_messageInfo_Nested.DiscardUnknown(m) -} - -var xxx_messageInfo_Nested proto.InternalMessageInfo - -func (m *Nested) GetBunny() string { - if m != nil { - return m.Bunny - } - return "" -} - -func (m *Nested) GetCute() bool { - if m != nil { - return m.Cute - } - return false -} - -type MessageWithMap struct { - ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping,proto3" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} -func (*MessageWithMap) Descriptor() ([]byte, []int) { - return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{2} -} -func (m *MessageWithMap) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageWithMap.Unmarshal(m, b) -} -func (m *MessageWithMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageWithMap.Marshal(b, m, deterministic) -} -func (dst *MessageWithMap) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageWithMap.Merge(dst, src) -} -func (m *MessageWithMap) XXX_Size() int { - return xxx_messageInfo_MessageWithMap.Size(m) -} -func (m *MessageWithMap) XXX_DiscardUnknown() { - xxx_messageInfo_MessageWithMap.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageWithMap proto.InternalMessageInfo - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -type IntMap struct { - Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt,proto3" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IntMap) Reset() { *m = IntMap{} } -func (m *IntMap) String() string { return proto.CompactTextString(m) } -func (*IntMap) ProtoMessage() {} -func (*IntMap) Descriptor() ([]byte, []int) { - return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{3} -} -func (m *IntMap) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IntMap.Unmarshal(m, b) -} -func (m *IntMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IntMap.Marshal(b, m, deterministic) -} -func (dst *IntMap) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntMap.Merge(dst, src) -} -func (m *IntMap) XXX_Size() int { - return xxx_messageInfo_IntMap.Size(m) -} -func (m *IntMap) XXX_DiscardUnknown() { - xxx_messageInfo_IntMap.DiscardUnknown(m) -} - -var xxx_messageInfo_IntMap proto.InternalMessageInfo - -func (m *IntMap) GetRtt() map[int32]int32 { - if m != nil { - return m.Rtt - } - return nil -} - -type IntMaps struct { - Maps []*IntMap `protobuf:"bytes,1,rep,name=maps,proto3" json:"maps,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IntMaps) Reset() { *m = IntMaps{} } -func (m *IntMaps) String() string { return proto.CompactTextString(m) } -func (*IntMaps) ProtoMessage() {} -func (*IntMaps) Descriptor() ([]byte, []int) { - return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{4} -} -func (m *IntMaps) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IntMaps.Unmarshal(m, b) -} -func (m *IntMaps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IntMaps.Marshal(b, m, deterministic) -} -func (dst *IntMaps) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntMaps.Merge(dst, src) -} -func (m *IntMaps) XXX_Size() int { - return xxx_messageInfo_IntMaps.Size(m) -} -func (m *IntMaps) XXX_DiscardUnknown() { - xxx_messageInfo_IntMaps.DiscardUnknown(m) -} - -var xxx_messageInfo_IntMaps proto.InternalMessageInfo - -func (m *IntMaps) GetMaps() []*IntMap { - if m != nil { - return m.Maps - } - return nil -} - -type TestUTF8 struct { - Scalar string `protobuf:"bytes,1,opt,name=scalar,proto3" json:"scalar,omitempty"` - Vector []string `protobuf:"bytes,2,rep,name=vector,proto3" json:"vector,omitempty"` - // Types that are valid to be assigned to Oneof: - // *TestUTF8_Field - Oneof isTestUTF8_Oneof `protobuf_oneof:"oneof"` - MapKey map[string]int64 `protobuf:"bytes,4,rep,name=map_key,json=mapKey,proto3" json:"map_key,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - MapValue map[int64]string `protobuf:"bytes,5,rep,name=map_value,json=mapValue,proto3" json:"map_value,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TestUTF8) Reset() { *m = TestUTF8{} } -func (m *TestUTF8) String() string { return proto.CompactTextString(m) } -func (*TestUTF8) ProtoMessage() {} -func (*TestUTF8) Descriptor() ([]byte, []int) { - return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{5} -} -func (m *TestUTF8) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestUTF8.Unmarshal(m, b) -} -func (m *TestUTF8) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestUTF8.Marshal(b, m, deterministic) -} -func (dst *TestUTF8) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestUTF8.Merge(dst, src) -} -func (m *TestUTF8) XXX_Size() int { - return xxx_messageInfo_TestUTF8.Size(m) -} -func (m *TestUTF8) XXX_DiscardUnknown() { - xxx_messageInfo_TestUTF8.DiscardUnknown(m) -} - -var xxx_messageInfo_TestUTF8 proto.InternalMessageInfo - -func (m *TestUTF8) GetScalar() string { - if m != nil { - return m.Scalar - } - return "" -} - -func (m *TestUTF8) GetVector() []string { - if m != nil { - return m.Vector - } - return nil -} - -type isTestUTF8_Oneof interface { - isTestUTF8_Oneof() -} - -type TestUTF8_Field struct { - Field string `protobuf:"bytes,3,opt,name=field,proto3,oneof"` -} - -func (*TestUTF8_Field) isTestUTF8_Oneof() {} - -func (m *TestUTF8) GetOneof() isTestUTF8_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *TestUTF8) GetField() string { - if x, ok := m.GetOneof().(*TestUTF8_Field); ok { - return x.Field - } - return "" -} - -func (m *TestUTF8) GetMapKey() map[string]int64 { - if m != nil { - return m.MapKey - } - return nil -} - -func (m *TestUTF8) GetMapValue() map[int64]string { - if m != nil { - return m.MapValue - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*TestUTF8) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _TestUTF8_OneofMarshaler, _TestUTF8_OneofUnmarshaler, _TestUTF8_OneofSizer, []interface{}{ - (*TestUTF8_Field)(nil), - } -} - -func _TestUTF8_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*TestUTF8) - // oneof - switch x := m.Oneof.(type) { - case *TestUTF8_Field: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Field) - case nil: - default: - return fmt.Errorf("TestUTF8.Oneof has unexpected type %T", x) - } - return nil -} - -func _TestUTF8_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*TestUTF8) - switch tag { - case 3: // oneof.field - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Oneof = &TestUTF8_Field{x} - return true, err - default: - return false, nil - } -} - -func _TestUTF8_OneofSizer(msg proto.Message) (n int) { - m := msg.(*TestUTF8) - // oneof - switch x := m.Oneof.(type) { - case *TestUTF8_Field: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Field))) - n += len(x.Field) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -func init() { - proto.RegisterType((*Message)(nil), "proto3_proto.Message") - proto.RegisterMapType((map[string]*test_proto.SubDefaults)(nil), "proto3_proto.Message.Proto2ValueEntry") - proto.RegisterMapType((map[string]string)(nil), "proto3_proto.Message.StringMapEntry") - proto.RegisterMapType((map[string]*Nested)(nil), "proto3_proto.Message.TerrainEntry") - proto.RegisterType((*Nested)(nil), "proto3_proto.Nested") - proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap") - proto.RegisterMapType((map[bool][]byte)(nil), "proto3_proto.MessageWithMap.ByteMappingEntry") - proto.RegisterType((*IntMap)(nil), "proto3_proto.IntMap") - proto.RegisterMapType((map[int32]int32)(nil), "proto3_proto.IntMap.RttEntry") - proto.RegisterType((*IntMaps)(nil), "proto3_proto.IntMaps") - proto.RegisterType((*TestUTF8)(nil), "proto3_proto.TestUTF8") - proto.RegisterMapType((map[string]int64)(nil), "proto3_proto.TestUTF8.MapKeyEntry") - proto.RegisterMapType((map[int64]string)(nil), "proto3_proto.TestUTF8.MapValueEntry") - proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) -} - -func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor_proto3_78ae00cd7e6e5e35) } - -var fileDescriptor_proto3_78ae00cd7e6e5e35 = []byte{ - // 896 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0x6f, 0x6f, 0xdb, 0xb6, - 0x13, 0xae, 0x2c, 0xff, 0x91, 0xcf, 0x76, 0xea, 0x1f, 0x7f, 0x6e, 0xc7, 0x7a, 0x1b, 0xa0, 0x79, - 0xc3, 0x20, 0x0c, 0xab, 0xb2, 0xb9, 0xc8, 0x90, 0xb5, 0xc5, 0x86, 0x24, 0x6b, 0x50, 0x23, 0xb1, - 0x67, 0xd0, 0xce, 0x82, 0xbd, 0x12, 0x68, 0x87, 0xb6, 0x85, 0x59, 0x94, 0x27, 0x52, 0x05, 0xf4, - 0x05, 0xf6, 0x41, 0xf6, 0x95, 0xf6, 0x85, 0x06, 0x92, 0x72, 0x2a, 0x17, 0xea, 0xf2, 0x4a, 0xbc, - 0x47, 0xcf, 0xdd, 0x73, 0xbc, 0x3b, 0x1e, 0x3c, 0xdb, 0x25, 0xb1, 0x8c, 0x5f, 0x04, 0xfa, 0x73, - 0x6c, 0x0c, 0x5f, 0x7f, 0x50, 0xbb, 0xf8, 0xab, 0xff, 0x6c, 0x1d, 0xc7, 0xeb, 0x2d, 0x33, 0x94, - 0x45, 0xba, 0x3a, 0xa6, 0x3c, 0x33, 0xc4, 0xfe, 0x13, 0xc9, 0x84, 0xcc, 0x23, 0xa8, 0xa3, 0x81, - 0x07, 0x7f, 0x35, 0xa1, 0x31, 0x66, 0x42, 0xd0, 0x35, 0x43, 0x08, 0xaa, 0x9c, 0x46, 0x0c, 0x5b, - 0xae, 0xe5, 0x35, 0x89, 0x3e, 0xa3, 0x53, 0x70, 0x36, 0xe1, 0x96, 0x26, 0xa1, 0xcc, 0x70, 0xc5, - 0xb5, 0xbc, 0xa3, 0xe1, 0x67, 0x7e, 0x51, 0xd2, 0xcf, 0x9d, 0xfd, 0xb7, 0x69, 0x14, 0xa7, 0x09, - 0xb9, 0x67, 0x23, 0x17, 0xda, 0x1b, 0x16, 0xae, 0x37, 0x32, 0x08, 0x79, 0xb0, 0x8c, 0xb0, 0xed, - 0x5a, 0x5e, 0x87, 0x80, 0xc1, 0x46, 0xfc, 0x22, 0x52, 0x7a, 0x77, 0x54, 0x52, 0x5c, 0x75, 0x2d, - 0xaf, 0x4d, 0xf4, 0x19, 0x7d, 0x01, 0xed, 0x84, 0x89, 0x74, 0x2b, 0x83, 0x65, 0x9c, 0x72, 0x89, - 0x1b, 0xae, 0xe5, 0xd9, 0xa4, 0x65, 0xb0, 0x0b, 0x05, 0xa1, 0x2f, 0xa1, 0x23, 0x93, 0x94, 0x05, - 0x62, 0x19, 0x4b, 0x11, 0x51, 0x8e, 0x1d, 0xd7, 0xf2, 0x1c, 0xd2, 0x56, 0xe0, 0x2c, 0xc7, 0x50, - 0x0f, 0x6a, 0x62, 0x19, 0x27, 0x0c, 0x37, 0x5d, 0xcb, 0xab, 0x10, 0x63, 0xa0, 0x2e, 0xd8, 0x7f, - 0xb0, 0x0c, 0xd7, 0x5c, 0xdb, 0xab, 0x12, 0x75, 0x44, 0x9f, 0x42, 0x53, 0x6c, 0xe2, 0x44, 0x06, - 0x0a, 0xff, 0xbf, 0x6b, 0x7b, 0x35, 0xe2, 0x68, 0xe0, 0x8a, 0x65, 0xe8, 0x5b, 0xa8, 0x73, 0x26, - 0x24, 0xbb, 0xc3, 0x75, 0xd7, 0xf2, 0x5a, 0xc3, 0xde, 0xe1, 0xd5, 0x27, 0xfa, 0x1f, 0xc9, 0x39, - 0xe8, 0x04, 0x1a, 0x49, 0xb0, 0x4a, 0x39, 0xcf, 0x70, 0xd7, 0xb5, 0x1f, 0xac, 0x54, 0x3d, 0xb9, - 0x54, 0x5c, 0xf4, 0x1a, 0x1a, 0x92, 0x25, 0x09, 0x0d, 0x39, 0x06, 0xd7, 0xf6, 0x5a, 0xc3, 0x41, - 0xb9, 0xdb, 0xdc, 0x90, 0xde, 0x70, 0x99, 0x64, 0x64, 0xef, 0x82, 0x5e, 0x82, 0x99, 0x80, 0x61, - 0xb0, 0x0a, 0xd9, 0xf6, 0x0e, 0xb7, 0x74, 0xa2, 0x9f, 0xf8, 0xef, 0xbb, 0xed, 0xcf, 0xd2, 0xc5, - 0x2f, 0x6c, 0x45, 0xd3, 0xad, 0x14, 0xa4, 0x65, 0xc8, 0x97, 0x8a, 0x8b, 0x46, 0xf7, 0xbe, 0xef, - 0xe8, 0x36, 0x65, 0xb8, 0xa3, 0xe5, 0xbf, 0x2e, 0x97, 0x9f, 0x6a, 0xe6, 0x6f, 0x8a, 0x68, 0x52, - 0xc8, 0x43, 0x69, 0x04, 0x7d, 0x07, 0x0e, 0xe5, 0x99, 0xdc, 0x84, 0x7c, 0x8d, 0x8f, 0xf2, 0x5a, - 0x99, 0x59, 0xf4, 0xf7, 0xb3, 0xe8, 0x9f, 0xf1, 0x8c, 0xdc, 0xb3, 0xd0, 0x09, 0xb4, 0x22, 0xca, - 0xb3, 0x40, 0x5b, 0x02, 0x3f, 0xd6, 0xda, 0xe5, 0x4e, 0xa0, 0x88, 0x73, 0xcd, 0x43, 0x27, 0x00, - 0x22, 0x5d, 0x44, 0x26, 0x29, 0xfc, 0x3f, 0x2d, 0xf5, 0xa4, 0x34, 0x63, 0x52, 0x20, 0xa2, 0xef, - 0xc1, 0x59, 0x6e, 0xc2, 0xed, 0x5d, 0xc2, 0x38, 0x46, 0x5a, 0xea, 0x23, 0x4e, 0xf7, 0x34, 0x74, - 0x01, 0x20, 0x64, 0x12, 0xf2, 0x75, 0x10, 0xd1, 0x1d, 0xee, 0x69, 0xa7, 0xaf, 0xca, 0x6b, 0x33, - 0xd3, 0xbc, 0x31, 0xdd, 0x99, 0xca, 0x34, 0xc5, 0xde, 0xee, 0x4f, 0xa1, 0x5d, 0xec, 0xdb, 0x7e, - 0x00, 0xcd, 0x0b, 0xd3, 0x03, 0xf8, 0x0d, 0xd4, 0x4c, 0xf5, 0x2b, 0xff, 0x31, 0x62, 0x86, 0xf2, - 0xb2, 0x72, 0x6a, 0xf5, 0x6f, 0xa1, 0xfb, 0x61, 0x2b, 0x4a, 0xa2, 0x3e, 0x3f, 0x8c, 0xfa, 0xd1, - 0x79, 0x28, 0x04, 0x7e, 0x0d, 0x47, 0x87, 0xf7, 0x28, 0x09, 0xdb, 0x2b, 0x86, 0x6d, 0x16, 0xbc, - 0x07, 0x3f, 0x43, 0xdd, 0xcc, 0x35, 0x6a, 0x41, 0xe3, 0x66, 0x72, 0x35, 0xf9, 0xf5, 0x76, 0xd2, - 0x7d, 0x84, 0x1c, 0xa8, 0x4e, 0x6f, 0x26, 0xb3, 0xae, 0x85, 0x3a, 0xd0, 0x9c, 0x5d, 0x9f, 0x4d, - 0x67, 0xf3, 0xd1, 0xc5, 0x55, 0xb7, 0x82, 0x1e, 0x43, 0xeb, 0x7c, 0x74, 0x7d, 0x1d, 0x9c, 0x9f, - 0x8d, 0xae, 0xdf, 0xfc, 0xde, 0xb5, 0x07, 0x43, 0xa8, 0x9b, 0xcb, 0x2a, 0x91, 0x85, 0x7e, 0x45, - 0x46, 0xd8, 0x18, 0x6a, 0x59, 0x2c, 0x53, 0x69, 0x94, 0x1d, 0xa2, 0xcf, 0x83, 0xbf, 0x2d, 0x38, - 0xca, 0x7b, 0x70, 0x1b, 0xca, 0xcd, 0x98, 0xee, 0xd0, 0x14, 0xda, 0x8b, 0x4c, 0x32, 0xd5, 0xb3, - 0x9d, 0x1a, 0x46, 0x4b, 0xf7, 0xed, 0x79, 0x69, 0xdf, 0x72, 0x1f, 0xff, 0x3c, 0x93, 0x6c, 0x6c, - 0xf8, 0xf9, 0x68, 0x2f, 0xde, 0x23, 0xfd, 0x9f, 0xa0, 0xfb, 0x21, 0xa1, 0x58, 0x19, 0xa7, 0xa4, - 0x32, 0xed, 0x62, 0x65, 0xfe, 0x84, 0xfa, 0x88, 0x4b, 0x95, 0xdb, 0x31, 0xd8, 0x89, 0x94, 0x79, - 0x4a, 0x9f, 0x1f, 0xa6, 0x64, 0x28, 0x3e, 0x91, 0xd2, 0xa4, 0xa0, 0x98, 0xfd, 0x1f, 0xc0, 0xd9, - 0x03, 0x45, 0xc9, 0x5a, 0x89, 0x64, 0xad, 0x28, 0xf9, 0x02, 0x1a, 0x26, 0x9e, 0x40, 0x1e, 0x54, - 0x23, 0xba, 0x13, 0xb9, 0x68, 0xaf, 0x4c, 0x94, 0x68, 0xc6, 0xe0, 0x9f, 0x0a, 0x38, 0x73, 0x26, - 0xe4, 0xcd, 0xfc, 0xf2, 0x14, 0x3d, 0x85, 0xba, 0x58, 0xd2, 0x2d, 0x4d, 0xf2, 0x26, 0xe4, 0x96, - 0xc2, 0xdf, 0xb1, 0xa5, 0x8c, 0x13, 0x5c, 0x71, 0x6d, 0x85, 0x1b, 0x0b, 0x3d, 0x85, 0x9a, 0xd9, - 0x3f, 0x6a, 0xcb, 0x37, 0xdf, 0x3e, 0x22, 0xc6, 0x44, 0xaf, 0xa0, 0x11, 0xd1, 0x9d, 0x5e, 0xae, - 0xd5, 0xb2, 0xe5, 0xb6, 0x17, 0xf4, 0xc7, 0x74, 0x77, 0xc5, 0x32, 0x73, 0xf7, 0x7a, 0xa4, 0x0d, - 0x74, 0x06, 0x4d, 0xe5, 0x6c, 0x2e, 0x59, 0x2b, 0x7b, 0x80, 0x45, 0xf7, 0xc2, 0x6a, 0x72, 0xa2, - 0xdc, 0xec, 0xff, 0x08, 0xad, 0x42, 0xe4, 0x87, 0x26, 0xda, 0x2e, 0xbe, 0x87, 0x57, 0xd0, 0x39, - 0x88, 0x5a, 0x74, 0xb6, 0x1f, 0x78, 0x0e, 0xe7, 0x0d, 0xa8, 0xc5, 0x9c, 0xc5, 0xab, 0x45, 0xdd, - 0xe4, 0xfb, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xeb, 0x74, 0x17, 0x7f, 0xc3, 0x07, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto deleted file mode 100644 index 6adea22f..00000000 --- a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto +++ /dev/null @@ -1,97 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -import "google/protobuf/any.proto"; -import "test_proto/test.proto"; - -package proto3_proto; - -message Message { - enum Humour { - UNKNOWN = 0; - PUNS = 1; - SLAPSTICK = 2; - BILL_BAILEY = 3; - } - - string name = 1; - Humour hilarity = 2; - uint32 height_in_cm = 3; - bytes data = 4; - int64 result_count = 7; - bool true_scotsman = 8; - float score = 9; - - repeated uint64 key = 5; - repeated int32 short_key = 19; - Nested nested = 6; - repeated Humour r_funny = 16; - - map terrain = 10; - test_proto.SubDefaults proto2_field = 11; - map proto2_value = 13; - - google.protobuf.Any anything = 14; - repeated google.protobuf.Any many_things = 15; - - Message submessage = 17; - repeated Message children = 18; - - map string_map = 20; -} - -message Nested { - string bunny = 1; - bool cute = 2; -} - -message MessageWithMap { - map byte_mapping = 1; -} - - -message IntMap { - map rtt = 1; -} - -message IntMaps { - repeated IntMap maps = 1; -} - -message TestUTF8 { - string scalar = 1; - repeated string vector = 2; - oneof oneof { string field = 3; } - map map_key = 4; - map map_value = 5; -} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_test.go b/vendor/github.com/golang/protobuf/proto/proto3_test.go deleted file mode 100644 index 73eed6c0..00000000 --- a/vendor/github.com/golang/protobuf/proto/proto3_test.go +++ /dev/null @@ -1,151 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/proto3_proto" - tpb "github.com/golang/protobuf/proto/test_proto" -) - -func TestProto3ZeroValues(t *testing.T) { - tests := []struct { - desc string - m proto.Message - }{ - {"zero message", &pb.Message{}}, - {"empty bytes field", &pb.Message{Data: []byte{}}}, - } - for _, test := range tests { - b, err := proto.Marshal(test.m) - if err != nil { - t.Errorf("%s: proto.Marshal: %v", test.desc, err) - continue - } - if len(b) > 0 { - t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) - } - } -} - -func TestRoundTripProto3(t *testing.T) { - m := &pb.Message{ - Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" - Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 - HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 - Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" - ResultCount: 47, // (0 | 7<<3): 0x38 0x2f - TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 - Score: 8.1, // (5 | 9<<3): 0x4d <8.1> - - Key: []uint64{1, 0xdeadbeef}, - Nested: &pb.Nested{ - Bunny: "Monty", - }, - } - t.Logf(" m: %v", m) - - b, err := proto.Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal: %v", err) - } - t.Logf(" b: %q", b) - - m2 := new(pb.Message) - if err := proto.Unmarshal(b, m2); err != nil { - t.Fatalf("proto.Unmarshal: %v", err) - } - t.Logf("m2: %v", m2) - - if !proto.Equal(m, m2) { - t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) - } -} - -func TestGettersForBasicTypesExist(t *testing.T) { - var m pb.Message - if got := m.GetNested().GetBunny(); got != "" { - t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got) - } - if got := m.GetNested().GetCute(); got { - t.Errorf("m.GetNested().GetCute() = %t, want false", got) - } -} - -func TestProto3SetDefaults(t *testing.T) { - in := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: new(tpb.SubDefaults), - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": new(tpb.SubDefaults), - }, - } - - got := proto.Clone(in).(*pb.Message) - proto.SetDefaults(got) - - // There are no defaults in proto3. Everything should be the zero value, but - // we need to remember to set defaults for nested proto2 messages. - want := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, - }, - } - - if !proto.Equal(got, want) { - t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) - } -} - -func TestUnknownFieldPreservation(t *testing.T) { - b1 := "\x0a\x05David" // Known tag 1 - b2 := "\xc2\x0c\x06Google" // Unknown tag 200 - b := []byte(b1 + b2) - - m := new(pb.Message) - if err := proto.Unmarshal(b, m); err != nil { - t.Fatalf("proto.Unmarshal: %v", err) - } - - if !bytes.Equal(m.XXX_unrecognized, []byte(b2)) { - t.Fatalf("mismatching unknown fields:\ngot %q\nwant %q", m.XXX_unrecognized, b2) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 00000000..1e7ff642 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,323 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { + b = fd.ProtoLegacyRawDesc() + } else { + // TODO: Use protodesc.ToFileDescriptorProto to construct + // a descriptorpb.FileDescriptorProto and marshal it. + // However, doing so causes the proto package to have a dependency + // on descriptorpb, leading to cyclic dependency issues. + } + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/vendor/github.com/golang/protobuf/proto/size2_test.go b/vendor/github.com/golang/protobuf/proto/size2_test.go deleted file mode 100644 index 7846b061..00000000 --- a/vendor/github.com/golang/protobuf/proto/size2_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "testing" -) - -// This is a separate file and package from size_test.go because that one uses -// generated messages and thus may not be in package proto without having a circular -// dependency, whereas this file tests unexported details of size.go. - -func TestVarintSize(t *testing.T) { - // Check the edge cases carefully. - testCases := []struct { - n uint64 - size int - }{ - {0, 1}, - {1, 1}, - {127, 1}, - {128, 2}, - {16383, 2}, - {16384, 3}, - {1<<63 - 1, 9}, - {1 << 63, 10}, - } - for _, tc := range testCases { - size := SizeVarint(tc.n) - if size != tc.size { - t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/size_test.go b/vendor/github.com/golang/protobuf/proto/size_test.go deleted file mode 100644 index 3abac418..00000000 --- a/vendor/github.com/golang/protobuf/proto/size_test.go +++ /dev/null @@ -1,191 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "log" - "strings" - "testing" - - . "github.com/golang/protobuf/proto" - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/test_proto" -) - -var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} - -// messageWithExtension2 is in equal_test.go. -var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} - -func init() { - if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - - // Force messageWithExtension3 to have the extension encoded. - Marshal(messageWithExtension3) - -} - -// non-pointer custom message -type nonptrMessage struct{} - -func (m nonptrMessage) ProtoMessage() {} -func (m nonptrMessage) Reset() {} -func (m nonptrMessage) String() string { return "" } - -func (m nonptrMessage) Marshal() ([]byte, error) { - return []byte{42}, nil -} - -// custom message embedding a proto.Message -type messageWithEmbedding struct { - *pb.OtherMessage -} - -func (m *messageWithEmbedding) ProtoMessage() {} -func (m *messageWithEmbedding) Reset() {} -func (m *messageWithEmbedding) String() string { return "" } - -func (m *messageWithEmbedding) Marshal() ([]byte, error) { - return []byte{42}, nil -} - -var SizeTests = []struct { - desc string - pb Message -}{ - {"empty", &pb.OtherMessage{}}, - // Basic types. - {"bool", &pb.Defaults{F_Bool: Bool(true)}}, - {"int32", &pb.Defaults{F_Int32: Int32(12)}}, - {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, - {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, - {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, - {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, - {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, - {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, - {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, - {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, - {"float", &pb.Defaults{F_Float: Float32(12.6)}}, - {"double", &pb.Defaults{F_Double: Float64(13.9)}}, - {"string", &pb.Defaults{F_String: String("niles")}}, - {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, - {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, - {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, - {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, - {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, - // Repeated. - {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, - {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, - {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, - {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, - {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, - {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ - // Need enough large numbers to verify that the header is counting the number of bytes - // for the field, not the number of elements. - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - }}}, - {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, - {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, - // Nested. - {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, - {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, - // Other things. - {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, - {"extension (unencoded)", messageWithExtension1}, - {"extension (encoded)", messageWithExtension3}, - // proto3 message - {"proto3 empty", &proto3pb.Message{}}, - {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, - {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, - {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, - {"proto3 float", &proto3pb.Message{Score: 12.6}}, - {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, - {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, - {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, - {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, - - {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, - {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, - {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, - {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, - - {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, - {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, - {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, - - {"oneof not set", &pb.Oneof{}}, - {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}}, - {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}}, - {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}}, - {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}}, - {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}}, - {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}}, - {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}}, - {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}}, - {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}}, - {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}}, - {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}}, - {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}}, - {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}}, - {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}}, - {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}}, - {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}}, - {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}}, - {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}}, - {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}}, - {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}}, - - {"non-pointer message", nonptrMessage{}}, - {"custom message with embedding", &messageWithEmbedding{&pb.OtherMessage{}}}, -} - -func TestSize(t *testing.T) { - for _, tc := range SizeTests { - size := Size(tc.pb) - b, err := Marshal(tc.pb) - if err != nil { - t.Errorf("%v: Marshal failed: %v", tc.desc, err) - continue - } - if size != len(b) { - t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) - t.Logf("%v: bytes: %#v", tc.desc, b) - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index b1679449..00000000 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2767 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def6..00000000 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index ebf1caa5..00000000 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2051 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - if fn.IsValid() { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - } - } - - // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) <= 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/golang/protobuf/proto/test_proto/test.pb.go b/vendor/github.com/golang/protobuf/proto/test_proto/test.pb.go deleted file mode 100644 index dc3ef673..00000000 --- a/vendor/github.com/golang/protobuf/proto/test_proto/test.pb.go +++ /dev/null @@ -1,5314 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: test_proto/test.proto - -package test_proto // import "github.com/golang/protobuf/proto/test_proto" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type FOO int32 - -const ( - FOO_FOO1 FOO = 1 -) - -var FOO_name = map[int32]string{ - 1: "FOO1", -} -var FOO_value = map[string]int32{ - "FOO1": 1, -} - -func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p -} -func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) -} -func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") - if err != nil { - return err - } - *x = FOO(value) - return nil -} -func (FOO) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{0} -} - -// An enum, for completeness. -type GoTest_KIND int32 - -const ( - GoTest_VOID GoTest_KIND = 0 - // Basic types - GoTest_BOOL GoTest_KIND = 1 - GoTest_BYTES GoTest_KIND = 2 - GoTest_FINGERPRINT GoTest_KIND = 3 - GoTest_FLOAT GoTest_KIND = 4 - GoTest_INT GoTest_KIND = 5 - GoTest_STRING GoTest_KIND = 6 - GoTest_TIME GoTest_KIND = 7 - // Groupings - GoTest_TUPLE GoTest_KIND = 8 - GoTest_ARRAY GoTest_KIND = 9 - GoTest_MAP GoTest_KIND = 10 - // Table types - GoTest_TABLE GoTest_KIND = 11 - // Functions - GoTest_FUNCTION GoTest_KIND = 12 -) - -var GoTest_KIND_name = map[int32]string{ - 0: "VOID", - 1: "BOOL", - 2: "BYTES", - 3: "FINGERPRINT", - 4: "FLOAT", - 5: "INT", - 6: "STRING", - 7: "TIME", - 8: "TUPLE", - 9: "ARRAY", - 10: "MAP", - 11: "TABLE", - 12: "FUNCTION", -} -var GoTest_KIND_value = map[string]int32{ - "VOID": 0, - "BOOL": 1, - "BYTES": 2, - "FINGERPRINT": 3, - "FLOAT": 4, - "INT": 5, - "STRING": 6, - "TIME": 7, - "TUPLE": 8, - "ARRAY": 9, - "MAP": 10, - "TABLE": 11, - "FUNCTION": 12, -} - -func (x GoTest_KIND) Enum() *GoTest_KIND { - p := new(GoTest_KIND) - *p = x - return p -} -func (x GoTest_KIND) String() string { - return proto.EnumName(GoTest_KIND_name, int32(x)) -} -func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") - if err != nil { - return err - } - *x = GoTest_KIND(value) - return nil -} -func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{2, 0} -} - -type MyMessage_Color int32 - -const ( - MyMessage_RED MyMessage_Color = 0 - MyMessage_GREEN MyMessage_Color = 1 - MyMessage_BLUE MyMessage_Color = 2 -) - -var MyMessage_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var MyMessage_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x MyMessage_Color) Enum() *MyMessage_Color { - p := new(MyMessage_Color) - *p = x - return p -} -func (x MyMessage_Color) String() string { - return proto.EnumName(MyMessage_Color_name, int32(x)) -} -func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") - if err != nil { - return err - } - *x = MyMessage_Color(value) - return nil -} -func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{13, 0} -} - -type DefaultsMessage_DefaultsEnum int32 - -const ( - DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 - DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 - DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 -) - -var DefaultsMessage_DefaultsEnum_name = map[int32]string{ - 0: "ZERO", - 1: "ONE", - 2: "TWO", -} -var DefaultsMessage_DefaultsEnum_value = map[string]int32{ - "ZERO": 0, - "ONE": 1, - "TWO": 2, -} - -func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { - p := new(DefaultsMessage_DefaultsEnum) - *p = x - return p -} -func (x DefaultsMessage_DefaultsEnum) String() string { - return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) -} -func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") - if err != nil { - return err - } - *x = DefaultsMessage_DefaultsEnum(value) - return nil -} -func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{16, 0} -} - -type Defaults_Color int32 - -const ( - Defaults_RED Defaults_Color = 0 - Defaults_GREEN Defaults_Color = 1 - Defaults_BLUE Defaults_Color = 2 -) - -var Defaults_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Defaults_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Defaults_Color) Enum() *Defaults_Color { - p := new(Defaults_Color) - *p = x - return p -} -func (x Defaults_Color) String() string { - return proto.EnumName(Defaults_Color_name, int32(x)) -} -func (x *Defaults_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") - if err != nil { - return err - } - *x = Defaults_Color(value) - return nil -} -func (Defaults_Color) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{21, 0} -} - -type RepeatedEnum_Color int32 - -const ( - RepeatedEnum_RED RepeatedEnum_Color = 1 -) - -var RepeatedEnum_Color_name = map[int32]string{ - 1: "RED", -} -var RepeatedEnum_Color_value = map[string]int32{ - "RED": 1, -} - -func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { - p := new(RepeatedEnum_Color) - *p = x - return p -} -func (x RepeatedEnum_Color) String() string { - return proto.EnumName(RepeatedEnum_Color_name, int32(x)) -} -func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") - if err != nil { - return err - } - *x = RepeatedEnum_Color(value) - return nil -} -func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{23, 0} -} - -type GoEnum struct { - Foo *FOO `protobuf:"varint,1,req,name=foo,enum=test_proto.FOO" json:"foo,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GoEnum) Reset() { *m = GoEnum{} } -func (m *GoEnum) String() string { return proto.CompactTextString(m) } -func (*GoEnum) ProtoMessage() {} -func (*GoEnum) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{0} -} -func (m *GoEnum) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GoEnum.Unmarshal(m, b) -} -func (m *GoEnum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GoEnum.Marshal(b, m, deterministic) -} -func (dst *GoEnum) XXX_Merge(src proto.Message) { - xxx_messageInfo_GoEnum.Merge(dst, src) -} -func (m *GoEnum) XXX_Size() int { - return xxx_messageInfo_GoEnum.Size(m) -} -func (m *GoEnum) XXX_DiscardUnknown() { - xxx_messageInfo_GoEnum.DiscardUnknown(m) -} - -var xxx_messageInfo_GoEnum proto.InternalMessageInfo - -func (m *GoEnum) GetFoo() FOO { - if m != nil && m.Foo != nil { - return *m.Foo - } - return FOO_FOO1 -} - -type GoTestField struct { - Label *string `protobuf:"bytes,1,req,name=Label" json:"Label,omitempty"` - Type *string `protobuf:"bytes,2,req,name=Type" json:"Type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GoTestField) Reset() { *m = GoTestField{} } -func (m *GoTestField) String() string { return proto.CompactTextString(m) } -func (*GoTestField) ProtoMessage() {} -func (*GoTestField) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{1} -} -func (m *GoTestField) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GoTestField.Unmarshal(m, b) -} -func (m *GoTestField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GoTestField.Marshal(b, m, deterministic) -} -func (dst *GoTestField) XXX_Merge(src proto.Message) { - xxx_messageInfo_GoTestField.Merge(dst, src) -} -func (m *GoTestField) XXX_Size() int { - return xxx_messageInfo_GoTestField.Size(m) -} -func (m *GoTestField) XXX_DiscardUnknown() { - xxx_messageInfo_GoTestField.DiscardUnknown(m) -} - -var xxx_messageInfo_GoTestField proto.InternalMessageInfo - -func (m *GoTestField) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" -} - -func (m *GoTestField) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -type GoTest struct { - // Some typical parameters - Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,enum=test_proto.GoTest_KIND" json:"Kind,omitempty"` - Table *string `protobuf:"bytes,2,opt,name=Table" json:"Table,omitempty"` - Param *int32 `protobuf:"varint,3,opt,name=Param" json:"Param,omitempty"` - // Required, repeated and optional foreign fields. - RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField" json:"RequiredField,omitempty"` - RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField" json:"RepeatedField,omitempty"` - OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField" json:"OptionalField,omitempty"` - // Required fields of all basic types - F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=FBoolRequired" json:"F_Bool_required,omitempty"` - F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=FInt32Required" json:"F_Int32_required,omitempty"` - F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=FInt64Required" json:"F_Int64_required,omitempty"` - F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=FFixed32Required" json:"F_Fixed32_required,omitempty"` - F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=FFixed64Required" json:"F_Fixed64_required,omitempty"` - F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=FUint32Required" json:"F_Uint32_required,omitempty"` - F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=FUint64Required" json:"F_Uint64_required,omitempty"` - F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=FFloatRequired" json:"F_Float_required,omitempty"` - F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=FDoubleRequired" json:"F_Double_required,omitempty"` - F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=FStringRequired" json:"F_String_required,omitempty"` - F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=FBytesRequired" json:"F_Bytes_required,omitempty"` - F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=FSint32Required" json:"F_Sint32_required,omitempty"` - F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=FSint64Required" json:"F_Sint64_required,omitempty"` - F_Sfixed32Required *int32 `protobuf:"fixed32,104,req,name=F_Sfixed32_required,json=FSfixed32Required" json:"F_Sfixed32_required,omitempty"` - F_Sfixed64Required *int64 `protobuf:"fixed64,105,req,name=F_Sfixed64_required,json=FSfixed64Required" json:"F_Sfixed64_required,omitempty"` - // Repeated fields of all basic types - F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=FBoolRepeated" json:"F_Bool_repeated,omitempty"` - F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=FInt32Repeated" json:"F_Int32_repeated,omitempty"` - F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=FInt64Repeated" json:"F_Int64_repeated,omitempty"` - F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=FFixed32Repeated" json:"F_Fixed32_repeated,omitempty"` - F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=FFixed64Repeated" json:"F_Fixed64_repeated,omitempty"` - F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=FUint32Repeated" json:"F_Uint32_repeated,omitempty"` - F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=FUint64Repeated" json:"F_Uint64_repeated,omitempty"` - F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=FFloatRepeated" json:"F_Float_repeated,omitempty"` - F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=FDoubleRepeated" json:"F_Double_repeated,omitempty"` - F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=FStringRepeated" json:"F_String_repeated,omitempty"` - F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=FBytesRepeated" json:"F_Bytes_repeated,omitempty"` - F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=FSint32Repeated" json:"F_Sint32_repeated,omitempty"` - F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=FSint64Repeated" json:"F_Sint64_repeated,omitempty"` - F_Sfixed32Repeated []int32 `protobuf:"fixed32,204,rep,name=F_Sfixed32_repeated,json=FSfixed32Repeated" json:"F_Sfixed32_repeated,omitempty"` - F_Sfixed64Repeated []int64 `protobuf:"fixed64,205,rep,name=F_Sfixed64_repeated,json=FSfixed64Repeated" json:"F_Sfixed64_repeated,omitempty"` - // Optional fields of all basic types - F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=FBoolOptional" json:"F_Bool_optional,omitempty"` - F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=FInt32Optional" json:"F_Int32_optional,omitempty"` - F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=FInt64Optional" json:"F_Int64_optional,omitempty"` - F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=FFixed32Optional" json:"F_Fixed32_optional,omitempty"` - F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=FFixed64Optional" json:"F_Fixed64_optional,omitempty"` - F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=FUint32Optional" json:"F_Uint32_optional,omitempty"` - F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=FUint64Optional" json:"F_Uint64_optional,omitempty"` - F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=FFloatOptional" json:"F_Float_optional,omitempty"` - F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=FDoubleOptional" json:"F_Double_optional,omitempty"` - F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=FStringOptional" json:"F_String_optional,omitempty"` - F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=FBytesOptional" json:"F_Bytes_optional,omitempty"` - F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=FSint32Optional" json:"F_Sint32_optional,omitempty"` - F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=FSint64Optional" json:"F_Sint64_optional,omitempty"` - F_Sfixed32Optional *int32 `protobuf:"fixed32,304,opt,name=F_Sfixed32_optional,json=FSfixed32Optional" json:"F_Sfixed32_optional,omitempty"` - F_Sfixed64Optional *int64 `protobuf:"fixed64,305,opt,name=F_Sfixed64_optional,json=FSfixed64Optional" json:"F_Sfixed64_optional,omitempty"` - // Default-valued fields of all basic types - F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=FBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"` - F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=FInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"` - F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=FInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"` - F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=FFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` - F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=FFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` - F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=FUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` - F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=FUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` - F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=FFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"` - F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=FDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"` - F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=FStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` - F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=FBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` - F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=FSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` - F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=FSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` - F_Sfixed32Defaulted *int32 `protobuf:"fixed32,404,opt,name=F_Sfixed32_defaulted,json=FSfixed32Defaulted,def=-32" json:"F_Sfixed32_defaulted,omitempty"` - F_Sfixed64Defaulted *int64 `protobuf:"fixed64,405,opt,name=F_Sfixed64_defaulted,json=FSfixed64Defaulted,def=-64" json:"F_Sfixed64_defaulted,omitempty"` - // Packed repeated fields (no string or bytes). - F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=FBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"` - F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=FInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"` - F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=FInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"` - F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=FFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"` - F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=FFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"` - F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=FUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"` - F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=FUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"` - F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=FFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"` - F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=FDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"` - F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=FSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"` - F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=FSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"` - F_Sfixed32RepeatedPacked []int32 `protobuf:"fixed32,504,rep,packed,name=F_Sfixed32_repeated_packed,json=FSfixed32RepeatedPacked" json:"F_Sfixed32_repeated_packed,omitempty"` - F_Sfixed64RepeatedPacked []int64 `protobuf:"fixed64,505,rep,packed,name=F_Sfixed64_repeated_packed,json=FSfixed64RepeatedPacked" json:"F_Sfixed64_repeated_packed,omitempty"` - Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"` - Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"` - Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GoTest) Reset() { *m = GoTest{} } -func (m *GoTest) String() string { return proto.CompactTextString(m) } -func (*GoTest) ProtoMessage() {} -func (*GoTest) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{2} -} -func (m *GoTest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GoTest.Unmarshal(m, b) -} -func (m *GoTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GoTest.Marshal(b, m, deterministic) -} -func (dst *GoTest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GoTest.Merge(dst, src) -} -func (m *GoTest) XXX_Size() int { - return xxx_messageInfo_GoTest.Size(m) -} -func (m *GoTest) XXX_DiscardUnknown() { - xxx_messageInfo_GoTest.DiscardUnknown(m) -} - -var xxx_messageInfo_GoTest proto.InternalMessageInfo - -const Default_GoTest_F_BoolDefaulted bool = true -const Default_GoTest_F_Int32Defaulted int32 = 32 -const Default_GoTest_F_Int64Defaulted int64 = 64 -const Default_GoTest_F_Fixed32Defaulted uint32 = 320 -const Default_GoTest_F_Fixed64Defaulted uint64 = 640 -const Default_GoTest_F_Uint32Defaulted uint32 = 3200 -const Default_GoTest_F_Uint64Defaulted uint64 = 6400 -const Default_GoTest_F_FloatDefaulted float32 = 314159 -const Default_GoTest_F_DoubleDefaulted float64 = 271828 -const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" - -var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") - -const Default_GoTest_F_Sint32Defaulted int32 = -32 -const Default_GoTest_F_Sint64Defaulted int64 = -64 -const Default_GoTest_F_Sfixed32Defaulted int32 = -32 -const Default_GoTest_F_Sfixed64Defaulted int64 = -64 - -func (m *GoTest) GetKind() GoTest_KIND { - if m != nil && m.Kind != nil { - return *m.Kind - } - return GoTest_VOID -} - -func (m *GoTest) GetTable() string { - if m != nil && m.Table != nil { - return *m.Table - } - return "" -} - -func (m *GoTest) GetParam() int32 { - if m != nil && m.Param != nil { - return *m.Param - } - return 0 -} - -func (m *GoTest) GetRequiredField() *GoTestField { - if m != nil { - return m.RequiredField - } - return nil -} - -func (m *GoTest) GetRepeatedField() []*GoTestField { - if m != nil { - return m.RepeatedField - } - return nil -} - -func (m *GoTest) GetOptionalField() *GoTestField { - if m != nil { - return m.OptionalField - } - return nil -} - -func (m *GoTest) GetF_BoolRequired() bool { - if m != nil && m.F_BoolRequired != nil { - return *m.F_BoolRequired - } - return false -} - -func (m *GoTest) GetF_Int32Required() int32 { - if m != nil && m.F_Int32Required != nil { - return *m.F_Int32Required - } - return 0 -} - -func (m *GoTest) GetF_Int64Required() int64 { - if m != nil && m.F_Int64Required != nil { - return *m.F_Int64Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Required() uint32 { - if m != nil && m.F_Fixed32Required != nil { - return *m.F_Fixed32Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Required() uint64 { - if m != nil && m.F_Fixed64Required != nil { - return *m.F_Fixed64Required - } - return 0 -} - -func (m *GoTest) GetF_Uint32Required() uint32 { - if m != nil && m.F_Uint32Required != nil { - return *m.F_Uint32Required - } - return 0 -} - -func (m *GoTest) GetF_Uint64Required() uint64 { - if m != nil && m.F_Uint64Required != nil { - return *m.F_Uint64Required - } - return 0 -} - -func (m *GoTest) GetF_FloatRequired() float32 { - if m != nil && m.F_FloatRequired != nil { - return *m.F_FloatRequired - } - return 0 -} - -func (m *GoTest) GetF_DoubleRequired() float64 { - if m != nil && m.F_DoubleRequired != nil { - return *m.F_DoubleRequired - } - return 0 -} - -func (m *GoTest) GetF_StringRequired() string { - if m != nil && m.F_StringRequired != nil { - return *m.F_StringRequired - } - return "" -} - -func (m *GoTest) GetF_BytesRequired() []byte { - if m != nil { - return m.F_BytesRequired - } - return nil -} - -func (m *GoTest) GetF_Sint32Required() int32 { - if m != nil && m.F_Sint32Required != nil { - return *m.F_Sint32Required - } - return 0 -} - -func (m *GoTest) GetF_Sint64Required() int64 { - if m != nil && m.F_Sint64Required != nil { - return *m.F_Sint64Required - } - return 0 -} - -func (m *GoTest) GetF_Sfixed32Required() int32 { - if m != nil && m.F_Sfixed32Required != nil { - return *m.F_Sfixed32Required - } - return 0 -} - -func (m *GoTest) GetF_Sfixed64Required() int64 { - if m != nil && m.F_Sfixed64Required != nil { - return *m.F_Sfixed64Required - } - return 0 -} - -func (m *GoTest) GetF_BoolRepeated() []bool { - if m != nil { - return m.F_BoolRepeated - } - return nil -} - -func (m *GoTest) GetF_Int32Repeated() []int32 { - if m != nil { - return m.F_Int32Repeated - } - return nil -} - -func (m *GoTest) GetF_Int64Repeated() []int64 { - if m != nil { - return m.F_Int64Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed32Repeated() []uint32 { - if m != nil { - return m.F_Fixed32Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed64Repeated() []uint64 { - if m != nil { - return m.F_Fixed64Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint32Repeated() []uint32 { - if m != nil { - return m.F_Uint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint64Repeated() []uint64 { - if m != nil { - return m.F_Uint64Repeated - } - return nil -} - -func (m *GoTest) GetF_FloatRepeated() []float32 { - if m != nil { - return m.F_FloatRepeated - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeated() []float64 { - if m != nil { - return m.F_DoubleRepeated - } - return nil -} - -func (m *GoTest) GetF_StringRepeated() []string { - if m != nil { - return m.F_StringRepeated - } - return nil -} - -func (m *GoTest) GetF_BytesRepeated() [][]byte { - if m != nil { - return m.F_BytesRepeated - } - return nil -} - -func (m *GoTest) GetF_Sint32Repeated() []int32 { - if m != nil { - return m.F_Sint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Sint64Repeated() []int64 { - if m != nil { - return m.F_Sint64Repeated - } - return nil -} - -func (m *GoTest) GetF_Sfixed32Repeated() []int32 { - if m != nil { - return m.F_Sfixed32Repeated - } - return nil -} - -func (m *GoTest) GetF_Sfixed64Repeated() []int64 { - if m != nil { - return m.F_Sfixed64Repeated - } - return nil -} - -func (m *GoTest) GetF_BoolOptional() bool { - if m != nil && m.F_BoolOptional != nil { - return *m.F_BoolOptional - } - return false -} - -func (m *GoTest) GetF_Int32Optional() int32 { - if m != nil && m.F_Int32Optional != nil { - return *m.F_Int32Optional - } - return 0 -} - -func (m *GoTest) GetF_Int64Optional() int64 { - if m != nil && m.F_Int64Optional != nil { - return *m.F_Int64Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Optional() uint32 { - if m != nil && m.F_Fixed32Optional != nil { - return *m.F_Fixed32Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Optional() uint64 { - if m != nil && m.F_Fixed64Optional != nil { - return *m.F_Fixed64Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint32Optional() uint32 { - if m != nil && m.F_Uint32Optional != nil { - return *m.F_Uint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint64Optional() uint64 { - if m != nil && m.F_Uint64Optional != nil { - return *m.F_Uint64Optional - } - return 0 -} - -func (m *GoTest) GetF_FloatOptional() float32 { - if m != nil && m.F_FloatOptional != nil { - return *m.F_FloatOptional - } - return 0 -} - -func (m *GoTest) GetF_DoubleOptional() float64 { - if m != nil && m.F_DoubleOptional != nil { - return *m.F_DoubleOptional - } - return 0 -} - -func (m *GoTest) GetF_StringOptional() string { - if m != nil && m.F_StringOptional != nil { - return *m.F_StringOptional - } - return "" -} - -func (m *GoTest) GetF_BytesOptional() []byte { - if m != nil { - return m.F_BytesOptional - } - return nil -} - -func (m *GoTest) GetF_Sint32Optional() int32 { - if m != nil && m.F_Sint32Optional != nil { - return *m.F_Sint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Sint64Optional() int64 { - if m != nil && m.F_Sint64Optional != nil { - return *m.F_Sint64Optional - } - return 0 -} - -func (m *GoTest) GetF_Sfixed32Optional() int32 { - if m != nil && m.F_Sfixed32Optional != nil { - return *m.F_Sfixed32Optional - } - return 0 -} - -func (m *GoTest) GetF_Sfixed64Optional() int64 { - if m != nil && m.F_Sfixed64Optional != nil { - return *m.F_Sfixed64Optional - } - return 0 -} - -func (m *GoTest) GetF_BoolDefaulted() bool { - if m != nil && m.F_BoolDefaulted != nil { - return *m.F_BoolDefaulted - } - return Default_GoTest_F_BoolDefaulted -} - -func (m *GoTest) GetF_Int32Defaulted() int32 { - if m != nil && m.F_Int32Defaulted != nil { - return *m.F_Int32Defaulted - } - return Default_GoTest_F_Int32Defaulted -} - -func (m *GoTest) GetF_Int64Defaulted() int64 { - if m != nil && m.F_Int64Defaulted != nil { - return *m.F_Int64Defaulted - } - return Default_GoTest_F_Int64Defaulted -} - -func (m *GoTest) GetF_Fixed32Defaulted() uint32 { - if m != nil && m.F_Fixed32Defaulted != nil { - return *m.F_Fixed32Defaulted - } - return Default_GoTest_F_Fixed32Defaulted -} - -func (m *GoTest) GetF_Fixed64Defaulted() uint64 { - if m != nil && m.F_Fixed64Defaulted != nil { - return *m.F_Fixed64Defaulted - } - return Default_GoTest_F_Fixed64Defaulted -} - -func (m *GoTest) GetF_Uint32Defaulted() uint32 { - if m != nil && m.F_Uint32Defaulted != nil { - return *m.F_Uint32Defaulted - } - return Default_GoTest_F_Uint32Defaulted -} - -func (m *GoTest) GetF_Uint64Defaulted() uint64 { - if m != nil && m.F_Uint64Defaulted != nil { - return *m.F_Uint64Defaulted - } - return Default_GoTest_F_Uint64Defaulted -} - -func (m *GoTest) GetF_FloatDefaulted() float32 { - if m != nil && m.F_FloatDefaulted != nil { - return *m.F_FloatDefaulted - } - return Default_GoTest_F_FloatDefaulted -} - -func (m *GoTest) GetF_DoubleDefaulted() float64 { - if m != nil && m.F_DoubleDefaulted != nil { - return *m.F_DoubleDefaulted - } - return Default_GoTest_F_DoubleDefaulted -} - -func (m *GoTest) GetF_StringDefaulted() string { - if m != nil && m.F_StringDefaulted != nil { - return *m.F_StringDefaulted - } - return Default_GoTest_F_StringDefaulted -} - -func (m *GoTest) GetF_BytesDefaulted() []byte { - if m != nil && m.F_BytesDefaulted != nil { - return m.F_BytesDefaulted - } - return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) -} - -func (m *GoTest) GetF_Sint32Defaulted() int32 { - if m != nil && m.F_Sint32Defaulted != nil { - return *m.F_Sint32Defaulted - } - return Default_GoTest_F_Sint32Defaulted -} - -func (m *GoTest) GetF_Sint64Defaulted() int64 { - if m != nil && m.F_Sint64Defaulted != nil { - return *m.F_Sint64Defaulted - } - return Default_GoTest_F_Sint64Defaulted -} - -func (m *GoTest) GetF_Sfixed32Defaulted() int32 { - if m != nil && m.F_Sfixed32Defaulted != nil { - return *m.F_Sfixed32Defaulted - } - return Default_GoTest_F_Sfixed32Defaulted -} - -func (m *GoTest) GetF_Sfixed64Defaulted() int64 { - if m != nil && m.F_Sfixed64Defaulted != nil { - return *m.F_Sfixed64Defaulted - } - return Default_GoTest_F_Sfixed64Defaulted -} - -func (m *GoTest) GetF_BoolRepeatedPacked() []bool { - if m != nil { - return m.F_BoolRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { - if m != nil { - return m.F_Int32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { - if m != nil { - return m.F_Int64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Fixed32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Fixed64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Uint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Uint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { - if m != nil { - return m.F_FloatRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { - if m != nil { - return m.F_DoubleRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { - if m != nil { - return m.F_Sint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { - if m != nil { - return m.F_Sint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sfixed32RepeatedPacked() []int32 { - if m != nil { - return m.F_Sfixed32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sfixed64RepeatedPacked() []int64 { - if m != nil { - return m.F_Sfixed64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { - if m != nil { - return m.Requiredgroup - } - return nil -} - -func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { - if m != nil { - return m.Repeatedgroup - } - return nil -} - -func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil -} - -// Required, repeated, and optional groups. -type GoTest_RequiredGroup struct { - RequiredField *string `protobuf:"bytes,71,req,name=RequiredField" json:"RequiredField,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } -func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RequiredGroup) ProtoMessage() {} -func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{2, 0} -} -func (m *GoTest_RequiredGroup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GoTest_RequiredGroup.Unmarshal(m, b) -} -func (m *GoTest_RequiredGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GoTest_RequiredGroup.Marshal(b, m, deterministic) -} -func (dst *GoTest_RequiredGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_GoTest_RequiredGroup.Merge(dst, src) -} -func (m *GoTest_RequiredGroup) XXX_Size() int { - return xxx_messageInfo_GoTest_RequiredGroup.Size(m) -} -func (m *GoTest_RequiredGroup) XXX_DiscardUnknown() { - xxx_messageInfo_GoTest_RequiredGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_GoTest_RequiredGroup proto.InternalMessageInfo - -func (m *GoTest_RequiredGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_RepeatedGroup struct { - RequiredField *string `protobuf:"bytes,81,req,name=RequiredField" json:"RequiredField,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } -func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RepeatedGroup) ProtoMessage() {} -func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{2, 1} -} -func (m *GoTest_RepeatedGroup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GoTest_RepeatedGroup.Unmarshal(m, b) -} -func (m *GoTest_RepeatedGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GoTest_RepeatedGroup.Marshal(b, m, deterministic) -} -func (dst *GoTest_RepeatedGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_GoTest_RepeatedGroup.Merge(dst, src) -} -func (m *GoTest_RepeatedGroup) XXX_Size() int { - return xxx_messageInfo_GoTest_RepeatedGroup.Size(m) -} -func (m *GoTest_RepeatedGroup) XXX_DiscardUnknown() { - xxx_messageInfo_GoTest_RepeatedGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_GoTest_RepeatedGroup proto.InternalMessageInfo - -func (m *GoTest_RepeatedGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,91,req,name=RequiredField" json:"RequiredField,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } -func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_OptionalGroup) ProtoMessage() {} -func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{2, 2} -} -func (m *GoTest_OptionalGroup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GoTest_OptionalGroup.Unmarshal(m, b) -} -func (m *GoTest_OptionalGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GoTest_OptionalGroup.Marshal(b, m, deterministic) -} -func (dst *GoTest_OptionalGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_GoTest_OptionalGroup.Merge(dst, src) -} -func (m *GoTest_OptionalGroup) XXX_Size() int { - return xxx_messageInfo_GoTest_OptionalGroup.Size(m) -} -func (m *GoTest_OptionalGroup) XXX_DiscardUnknown() { - xxx_messageInfo_GoTest_OptionalGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_GoTest_OptionalGroup proto.InternalMessageInfo - -func (m *GoTest_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -// For testing a group containing a required field. -type GoTestRequiredGroupField struct { - Group *GoTestRequiredGroupField_Group `protobuf:"group,1,req,name=Group,json=group" json:"group,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GoTestRequiredGroupField) Reset() { *m = GoTestRequiredGroupField{} } -func (m *GoTestRequiredGroupField) String() string { return proto.CompactTextString(m) } -func (*GoTestRequiredGroupField) ProtoMessage() {} -func (*GoTestRequiredGroupField) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{3} -} -func (m *GoTestRequiredGroupField) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GoTestRequiredGroupField.Unmarshal(m, b) -} -func (m *GoTestRequiredGroupField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GoTestRequiredGroupField.Marshal(b, m, deterministic) -} -func (dst *GoTestRequiredGroupField) XXX_Merge(src proto.Message) { - xxx_messageInfo_GoTestRequiredGroupField.Merge(dst, src) -} -func (m *GoTestRequiredGroupField) XXX_Size() int { - return xxx_messageInfo_GoTestRequiredGroupField.Size(m) -} -func (m *GoTestRequiredGroupField) XXX_DiscardUnknown() { - xxx_messageInfo_GoTestRequiredGroupField.DiscardUnknown(m) -} - -var xxx_messageInfo_GoTestRequiredGroupField proto.InternalMessageInfo - -func (m *GoTestRequiredGroupField) GetGroup() *GoTestRequiredGroupField_Group { - if m != nil { - return m.Group - } - return nil -} - -type GoTestRequiredGroupField_Group struct { - Field *int32 `protobuf:"varint,2,req,name=Field" json:"Field,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GoTestRequiredGroupField_Group) Reset() { *m = GoTestRequiredGroupField_Group{} } -func (m *GoTestRequiredGroupField_Group) String() string { return proto.CompactTextString(m) } -func (*GoTestRequiredGroupField_Group) ProtoMessage() {} -func (*GoTestRequiredGroupField_Group) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{3, 0} -} -func (m *GoTestRequiredGroupField_Group) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GoTestRequiredGroupField_Group.Unmarshal(m, b) -} -func (m *GoTestRequiredGroupField_Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GoTestRequiredGroupField_Group.Marshal(b, m, deterministic) -} -func (dst *GoTestRequiredGroupField_Group) XXX_Merge(src proto.Message) { - xxx_messageInfo_GoTestRequiredGroupField_Group.Merge(dst, src) -} -func (m *GoTestRequiredGroupField_Group) XXX_Size() int { - return xxx_messageInfo_GoTestRequiredGroupField_Group.Size(m) -} -func (m *GoTestRequiredGroupField_Group) XXX_DiscardUnknown() { - xxx_messageInfo_GoTestRequiredGroupField_Group.DiscardUnknown(m) -} - -var xxx_messageInfo_GoTestRequiredGroupField_Group proto.InternalMessageInfo - -func (m *GoTestRequiredGroupField_Group) GetField() int32 { - if m != nil && m.Field != nil { - return *m.Field - } - return 0 -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -type GoSkipTest struct { - SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"` - SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"` - SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"` - SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"` - Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } -func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest) ProtoMessage() {} -func (*GoSkipTest) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{4} -} -func (m *GoSkipTest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GoSkipTest.Unmarshal(m, b) -} -func (m *GoSkipTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GoSkipTest.Marshal(b, m, deterministic) -} -func (dst *GoSkipTest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GoSkipTest.Merge(dst, src) -} -func (m *GoSkipTest) XXX_Size() int { - return xxx_messageInfo_GoSkipTest.Size(m) -} -func (m *GoSkipTest) XXX_DiscardUnknown() { - xxx_messageInfo_GoSkipTest.DiscardUnknown(m) -} - -var xxx_messageInfo_GoSkipTest proto.InternalMessageInfo - -func (m *GoSkipTest) GetSkipInt32() int32 { - if m != nil && m.SkipInt32 != nil { - return *m.SkipInt32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed32() uint32 { - if m != nil && m.SkipFixed32 != nil { - return *m.SkipFixed32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed64() uint64 { - if m != nil && m.SkipFixed64 != nil { - return *m.SkipFixed64 - } - return 0 -} - -func (m *GoSkipTest) GetSkipString() string { - if m != nil && m.SkipString != nil { - return *m.SkipString - } - return "" -} - -func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { - if m != nil { - return m.Skipgroup - } - return nil -} - -type GoSkipTest_SkipGroup struct { - GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"` - GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } -func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest_SkipGroup) ProtoMessage() {} -func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{4, 0} -} -func (m *GoSkipTest_SkipGroup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GoSkipTest_SkipGroup.Unmarshal(m, b) -} -func (m *GoSkipTest_SkipGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GoSkipTest_SkipGroup.Marshal(b, m, deterministic) -} -func (dst *GoSkipTest_SkipGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_GoSkipTest_SkipGroup.Merge(dst, src) -} -func (m *GoSkipTest_SkipGroup) XXX_Size() int { - return xxx_messageInfo_GoSkipTest_SkipGroup.Size(m) -} -func (m *GoSkipTest_SkipGroup) XXX_DiscardUnknown() { - xxx_messageInfo_GoSkipTest_SkipGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_GoSkipTest_SkipGroup proto.InternalMessageInfo - -func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { - if m != nil && m.GroupInt32 != nil { - return *m.GroupInt32 - } - return 0 -} - -func (m *GoSkipTest_SkipGroup) GetGroupString() string { - if m != nil && m.GroupString != nil { - return *m.GroupString - } - return "" -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -type NonPackedTest struct { - A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } -func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } -func (*NonPackedTest) ProtoMessage() {} -func (*NonPackedTest) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{5} -} -func (m *NonPackedTest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NonPackedTest.Unmarshal(m, b) -} -func (m *NonPackedTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NonPackedTest.Marshal(b, m, deterministic) -} -func (dst *NonPackedTest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NonPackedTest.Merge(dst, src) -} -func (m *NonPackedTest) XXX_Size() int { - return xxx_messageInfo_NonPackedTest.Size(m) -} -func (m *NonPackedTest) XXX_DiscardUnknown() { - xxx_messageInfo_NonPackedTest.DiscardUnknown(m) -} - -var xxx_messageInfo_NonPackedTest proto.InternalMessageInfo - -func (m *NonPackedTest) GetA() []int32 { - if m != nil { - return m.A - } - return nil -} - -type PackedTest struct { - B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PackedTest) Reset() { *m = PackedTest{} } -func (m *PackedTest) String() string { return proto.CompactTextString(m) } -func (*PackedTest) ProtoMessage() {} -func (*PackedTest) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{6} -} -func (m *PackedTest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PackedTest.Unmarshal(m, b) -} -func (m *PackedTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PackedTest.Marshal(b, m, deterministic) -} -func (dst *PackedTest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PackedTest.Merge(dst, src) -} -func (m *PackedTest) XXX_Size() int { - return xxx_messageInfo_PackedTest.Size(m) -} -func (m *PackedTest) XXX_DiscardUnknown() { - xxx_messageInfo_PackedTest.DiscardUnknown(m) -} - -var xxx_messageInfo_PackedTest proto.InternalMessageInfo - -func (m *PackedTest) GetB() []int32 { - if m != nil { - return m.B - } - return nil -} - -type MaxTag struct { - // Maximum possible tag number. - LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MaxTag) Reset() { *m = MaxTag{} } -func (m *MaxTag) String() string { return proto.CompactTextString(m) } -func (*MaxTag) ProtoMessage() {} -func (*MaxTag) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{7} -} -func (m *MaxTag) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MaxTag.Unmarshal(m, b) -} -func (m *MaxTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MaxTag.Marshal(b, m, deterministic) -} -func (dst *MaxTag) XXX_Merge(src proto.Message) { - xxx_messageInfo_MaxTag.Merge(dst, src) -} -func (m *MaxTag) XXX_Size() int { - return xxx_messageInfo_MaxTag.Size(m) -} -func (m *MaxTag) XXX_DiscardUnknown() { - xxx_messageInfo_MaxTag.DiscardUnknown(m) -} - -var xxx_messageInfo_MaxTag proto.InternalMessageInfo - -func (m *MaxTag) GetLastField() string { - if m != nil && m.LastField != nil { - return *m.LastField - } - return "" -} - -type OldMessage struct { - Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OldMessage) Reset() { *m = OldMessage{} } -func (m *OldMessage) String() string { return proto.CompactTextString(m) } -func (*OldMessage) ProtoMessage() {} -func (*OldMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{8} -} -func (m *OldMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OldMessage.Unmarshal(m, b) -} -func (m *OldMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OldMessage.Marshal(b, m, deterministic) -} -func (dst *OldMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_OldMessage.Merge(dst, src) -} -func (m *OldMessage) XXX_Size() int { - return xxx_messageInfo_OldMessage.Size(m) -} -func (m *OldMessage) XXX_DiscardUnknown() { - xxx_messageInfo_OldMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_OldMessage proto.InternalMessageInfo - -func (m *OldMessage) GetNested() *OldMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *OldMessage) GetNum() int32 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type OldMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } -func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*OldMessage_Nested) ProtoMessage() {} -func (*OldMessage_Nested) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{8, 0} -} -func (m *OldMessage_Nested) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OldMessage_Nested.Unmarshal(m, b) -} -func (m *OldMessage_Nested) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OldMessage_Nested.Marshal(b, m, deterministic) -} -func (dst *OldMessage_Nested) XXX_Merge(src proto.Message) { - xxx_messageInfo_OldMessage_Nested.Merge(dst, src) -} -func (m *OldMessage_Nested) XXX_Size() int { - return xxx_messageInfo_OldMessage_Nested.Size(m) -} -func (m *OldMessage_Nested) XXX_DiscardUnknown() { - xxx_messageInfo_OldMessage_Nested.DiscardUnknown(m) -} - -var xxx_messageInfo_OldMessage_Nested proto.InternalMessageInfo - -func (m *OldMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -type NewMessage struct { - Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - // This is an int32 in OldMessage. - Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NewMessage) Reset() { *m = NewMessage{} } -func (m *NewMessage) String() string { return proto.CompactTextString(m) } -func (*NewMessage) ProtoMessage() {} -func (*NewMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{9} -} -func (m *NewMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NewMessage.Unmarshal(m, b) -} -func (m *NewMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NewMessage.Marshal(b, m, deterministic) -} -func (dst *NewMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_NewMessage.Merge(dst, src) -} -func (m *NewMessage) XXX_Size() int { - return xxx_messageInfo_NewMessage.Size(m) -} -func (m *NewMessage) XXX_DiscardUnknown() { - xxx_messageInfo_NewMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_NewMessage proto.InternalMessageInfo - -func (m *NewMessage) GetNested() *NewMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *NewMessage) GetNum() int64 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type NewMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } -func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*NewMessage_Nested) ProtoMessage() {} -func (*NewMessage_Nested) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{9, 0} -} -func (m *NewMessage_Nested) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NewMessage_Nested.Unmarshal(m, b) -} -func (m *NewMessage_Nested) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NewMessage_Nested.Marshal(b, m, deterministic) -} -func (dst *NewMessage_Nested) XXX_Merge(src proto.Message) { - xxx_messageInfo_NewMessage_Nested.Merge(dst, src) -} -func (m *NewMessage_Nested) XXX_Size() int { - return xxx_messageInfo_NewMessage_Nested.Size(m) -} -func (m *NewMessage_Nested) XXX_DiscardUnknown() { - xxx_messageInfo_NewMessage_Nested.DiscardUnknown(m) -} - -var xxx_messageInfo_NewMessage_Nested proto.InternalMessageInfo - -func (m *NewMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *NewMessage_Nested) GetFoodGroup() string { - if m != nil && m.FoodGroup != nil { - return *m.FoodGroup - } - return "" -} - -type InnerMessage struct { - Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` - Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` - Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InnerMessage) Reset() { *m = InnerMessage{} } -func (m *InnerMessage) String() string { return proto.CompactTextString(m) } -func (*InnerMessage) ProtoMessage() {} -func (*InnerMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{10} -} -func (m *InnerMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InnerMessage.Unmarshal(m, b) -} -func (m *InnerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InnerMessage.Marshal(b, m, deterministic) -} -func (dst *InnerMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_InnerMessage.Merge(dst, src) -} -func (m *InnerMessage) XXX_Size() int { - return xxx_messageInfo_InnerMessage.Size(m) -} -func (m *InnerMessage) XXX_DiscardUnknown() { - xxx_messageInfo_InnerMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_InnerMessage proto.InternalMessageInfo - -const Default_InnerMessage_Port int32 = 4000 - -func (m *InnerMessage) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *InnerMessage) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return Default_InnerMessage_Port -} - -func (m *InnerMessage) GetConnected() bool { - if m != nil && m.Connected != nil { - return *m.Connected - } - return false -} - -type OtherMessage struct { - Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` - Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OtherMessage) Reset() { *m = OtherMessage{} } -func (m *OtherMessage) String() string { return proto.CompactTextString(m) } -func (*OtherMessage) ProtoMessage() {} -func (*OtherMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{11} -} - -var extRange_OtherMessage = []proto.ExtensionRange{ - {Start: 100, End: 536870911}, -} - -func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OtherMessage -} -func (m *OtherMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OtherMessage.Unmarshal(m, b) -} -func (m *OtherMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OtherMessage.Marshal(b, m, deterministic) -} -func (dst *OtherMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_OtherMessage.Merge(dst, src) -} -func (m *OtherMessage) XXX_Size() int { - return xxx_messageInfo_OtherMessage.Size(m) -} -func (m *OtherMessage) XXX_DiscardUnknown() { - xxx_messageInfo_OtherMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_OtherMessage proto.InternalMessageInfo - -func (m *OtherMessage) GetKey() int64 { - if m != nil && m.Key != nil { - return *m.Key - } - return 0 -} - -func (m *OtherMessage) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *OtherMessage) GetWeight() float32 { - if m != nil && m.Weight != nil { - return *m.Weight - } - return 0 -} - -func (m *OtherMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -type RequiredInnerMessage struct { - LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} } -func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) } -func (*RequiredInnerMessage) ProtoMessage() {} -func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{12} -} -func (m *RequiredInnerMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RequiredInnerMessage.Unmarshal(m, b) -} -func (m *RequiredInnerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RequiredInnerMessage.Marshal(b, m, deterministic) -} -func (dst *RequiredInnerMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequiredInnerMessage.Merge(dst, src) -} -func (m *RequiredInnerMessage) XXX_Size() int { - return xxx_messageInfo_RequiredInnerMessage.Size(m) -} -func (m *RequiredInnerMessage) XXX_DiscardUnknown() { - xxx_messageInfo_RequiredInnerMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_RequiredInnerMessage proto.InternalMessageInfo - -func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage { - if m != nil { - return m.LeoFinallyWonAnOscar - } - return nil -} - -type MyMessage struct { - Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` - Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` - Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` - Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` - WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"` - RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"` - Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=test_proto.MyMessage_Color" json:"bikeshed,omitempty"` - Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` - // This field becomes [][]byte in the generated code. - RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"` - Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MyMessage) Reset() { *m = MyMessage{} } -func (m *MyMessage) String() string { return proto.CompactTextString(m) } -func (*MyMessage) ProtoMessage() {} -func (*MyMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{13} -} - -var extRange_MyMessage = []proto.ExtensionRange{ - {Start: 100, End: 536870911}, -} - -func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessage -} -func (m *MyMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MyMessage.Unmarshal(m, b) -} -func (m *MyMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MyMessage.Marshal(b, m, deterministic) -} -func (dst *MyMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_MyMessage.Merge(dst, src) -} -func (m *MyMessage) XXX_Size() int { - return xxx_messageInfo_MyMessage.Size(m) -} -func (m *MyMessage) XXX_DiscardUnknown() { - xxx_messageInfo_MyMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_MyMessage proto.InternalMessageInfo - -func (m *MyMessage) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *MyMessage) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MyMessage) GetQuote() string { - if m != nil && m.Quote != nil { - return *m.Quote - } - return "" -} - -func (m *MyMessage) GetPet() []string { - if m != nil { - return m.Pet - } - return nil -} - -func (m *MyMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -func (m *MyMessage) GetOthers() []*OtherMessage { - if m != nil { - return m.Others - } - return nil -} - -func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage { - if m != nil { - return m.WeMustGoDeeper - } - return nil -} - -func (m *MyMessage) GetRepInner() []*InnerMessage { - if m != nil { - return m.RepInner - } - return nil -} - -func (m *MyMessage) GetBikeshed() MyMessage_Color { - if m != nil && m.Bikeshed != nil { - return *m.Bikeshed - } - return MyMessage_RED -} - -func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { - if m != nil { - return m.Somegroup - } - return nil -} - -func (m *MyMessage) GetRepBytes() [][]byte { - if m != nil { - return m.RepBytes - } - return nil -} - -func (m *MyMessage) GetBigfloat() float64 { - if m != nil && m.Bigfloat != nil { - return *m.Bigfloat - } - return 0 -} - -type MyMessage_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } -func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*MyMessage_SomeGroup) ProtoMessage() {} -func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{13, 0} -} -func (m *MyMessage_SomeGroup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MyMessage_SomeGroup.Unmarshal(m, b) -} -func (m *MyMessage_SomeGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MyMessage_SomeGroup.Marshal(b, m, deterministic) -} -func (dst *MyMessage_SomeGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_MyMessage_SomeGroup.Merge(dst, src) -} -func (m *MyMessage_SomeGroup) XXX_Size() int { - return xxx_messageInfo_MyMessage_SomeGroup.Size(m) -} -func (m *MyMessage_SomeGroup) XXX_DiscardUnknown() { - xxx_messageInfo_MyMessage_SomeGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_MyMessage_SomeGroup proto.InternalMessageInfo - -func (m *MyMessage_SomeGroup) GetGroupField() int32 { - if m != nil && m.GroupField != nil { - return *m.GroupField - } - return 0 -} - -type Ext struct { - Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - MapField map[int32]int32 `protobuf:"bytes,2,rep,name=map_field,json=mapField" json:"map_field,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Ext) Reset() { *m = Ext{} } -func (m *Ext) String() string { return proto.CompactTextString(m) } -func (*Ext) ProtoMessage() {} -func (*Ext) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{14} -} -func (m *Ext) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Ext.Unmarshal(m, b) -} -func (m *Ext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Ext.Marshal(b, m, deterministic) -} -func (dst *Ext) XXX_Merge(src proto.Message) { - xxx_messageInfo_Ext.Merge(dst, src) -} -func (m *Ext) XXX_Size() int { - return xxx_messageInfo_Ext.Size(m) -} -func (m *Ext) XXX_DiscardUnknown() { - xxx_messageInfo_Ext.DiscardUnknown(m) -} - -var xxx_messageInfo_Ext proto.InternalMessageInfo - -func (m *Ext) GetData() string { - if m != nil && m.Data != nil { - return *m.Data - } - return "" -} - -func (m *Ext) GetMapField() map[int32]int32 { - if m != nil { - return m.MapField - } - return nil -} - -var E_Ext_More = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*Ext)(nil), - Field: 103, - Name: "test_proto.Ext.more", - Tag: "bytes,103,opt,name=more", - Filename: "test_proto/test.proto", -} - -var E_Ext_Text = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*string)(nil), - Field: 104, - Name: "test_proto.Ext.text", - Tag: "bytes,104,opt,name=text", - Filename: "test_proto/test.proto", -} - -var E_Ext_Number = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 105, - Name: "test_proto.Ext.number", - Tag: "varint,105,opt,name=number", - Filename: "test_proto/test.proto", -} - -type ComplexExtension struct { - First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"` - Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"` - Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ComplexExtension) Reset() { *m = ComplexExtension{} } -func (m *ComplexExtension) String() string { return proto.CompactTextString(m) } -func (*ComplexExtension) ProtoMessage() {} -func (*ComplexExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{15} -} -func (m *ComplexExtension) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ComplexExtension.Unmarshal(m, b) -} -func (m *ComplexExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ComplexExtension.Marshal(b, m, deterministic) -} -func (dst *ComplexExtension) XXX_Merge(src proto.Message) { - xxx_messageInfo_ComplexExtension.Merge(dst, src) -} -func (m *ComplexExtension) XXX_Size() int { - return xxx_messageInfo_ComplexExtension.Size(m) -} -func (m *ComplexExtension) XXX_DiscardUnknown() { - xxx_messageInfo_ComplexExtension.DiscardUnknown(m) -} - -var xxx_messageInfo_ComplexExtension proto.InternalMessageInfo - -func (m *ComplexExtension) GetFirst() int32 { - if m != nil && m.First != nil { - return *m.First - } - return 0 -} - -func (m *ComplexExtension) GetSecond() int32 { - if m != nil && m.Second != nil { - return *m.Second - } - return 0 -} - -func (m *ComplexExtension) GetThird() []int32 { - if m != nil { - return m.Third - } - return nil -} - -type DefaultsMessage struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } -func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } -func (*DefaultsMessage) ProtoMessage() {} -func (*DefaultsMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{16} -} - -var extRange_DefaultsMessage = []proto.ExtensionRange{ - {Start: 100, End: 536870911}, -} - -func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_DefaultsMessage -} -func (m *DefaultsMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DefaultsMessage.Unmarshal(m, b) -} -func (m *DefaultsMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DefaultsMessage.Marshal(b, m, deterministic) -} -func (dst *DefaultsMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_DefaultsMessage.Merge(dst, src) -} -func (m *DefaultsMessage) XXX_Size() int { - return xxx_messageInfo_DefaultsMessage.Size(m) -} -func (m *DefaultsMessage) XXX_DiscardUnknown() { - xxx_messageInfo_DefaultsMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_DefaultsMessage proto.InternalMessageInfo - -type MyMessageSet struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `protobuf_messageset:"1" json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } -func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } -func (*MyMessageSet) ProtoMessage() {} -func (*MyMessageSet) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{17} -} - -func (m *MyMessageSet) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) -} -func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) -} - -var extRange_MyMessageSet = []proto.ExtensionRange{ - {Start: 100, End: 2147483646}, -} - -func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessageSet -} -func (m *MyMessageSet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MyMessageSet.Unmarshal(m, b) -} -func (m *MyMessageSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MyMessageSet.Marshal(b, m, deterministic) -} -func (dst *MyMessageSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_MyMessageSet.Merge(dst, src) -} -func (m *MyMessageSet) XXX_Size() int { - return xxx_messageInfo_MyMessageSet.Size(m) -} -func (m *MyMessageSet) XXX_DiscardUnknown() { - xxx_messageInfo_MyMessageSet.DiscardUnknown(m) -} - -var xxx_messageInfo_MyMessageSet proto.InternalMessageInfo - -type Empty struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{18} -} -func (m *Empty) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Empty.Unmarshal(m, b) -} -func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Empty.Marshal(b, m, deterministic) -} -func (dst *Empty) XXX_Merge(src proto.Message) { - xxx_messageInfo_Empty.Merge(dst, src) -} -func (m *Empty) XXX_Size() int { - return xxx_messageInfo_Empty.Size(m) -} -func (m *Empty) XXX_DiscardUnknown() { - xxx_messageInfo_Empty.DiscardUnknown(m) -} - -var xxx_messageInfo_Empty proto.InternalMessageInfo - -type MessageList struct { - Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageList) Reset() { *m = MessageList{} } -func (m *MessageList) String() string { return proto.CompactTextString(m) } -func (*MessageList) ProtoMessage() {} -func (*MessageList) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{19} -} -func (m *MessageList) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageList.Unmarshal(m, b) -} -func (m *MessageList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageList.Marshal(b, m, deterministic) -} -func (dst *MessageList) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageList.Merge(dst, src) -} -func (m *MessageList) XXX_Size() int { - return xxx_messageInfo_MessageList.Size(m) -} -func (m *MessageList) XXX_DiscardUnknown() { - xxx_messageInfo_MessageList.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageList proto.InternalMessageInfo - -func (m *MessageList) GetMessage() []*MessageList_Message { - if m != nil { - return m.Message - } - return nil -} - -type MessageList_Message struct { - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } -func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } -func (*MessageList_Message) ProtoMessage() {} -func (*MessageList_Message) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{19, 0} -} -func (m *MessageList_Message) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageList_Message.Unmarshal(m, b) -} -func (m *MessageList_Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageList_Message.Marshal(b, m, deterministic) -} -func (dst *MessageList_Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageList_Message.Merge(dst, src) -} -func (m *MessageList_Message) XXX_Size() int { - return xxx_messageInfo_MessageList_Message.Size(m) -} -func (m *MessageList_Message) XXX_DiscardUnknown() { - xxx_messageInfo_MessageList_Message.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageList_Message proto.InternalMessageInfo - -func (m *MessageList_Message) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MessageList_Message) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -type Strings struct { - StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"` - BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Strings) Reset() { *m = Strings{} } -func (m *Strings) String() string { return proto.CompactTextString(m) } -func (*Strings) ProtoMessage() {} -func (*Strings) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{20} -} -func (m *Strings) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Strings.Unmarshal(m, b) -} -func (m *Strings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Strings.Marshal(b, m, deterministic) -} -func (dst *Strings) XXX_Merge(src proto.Message) { - xxx_messageInfo_Strings.Merge(dst, src) -} -func (m *Strings) XXX_Size() int { - return xxx_messageInfo_Strings.Size(m) -} -func (m *Strings) XXX_DiscardUnknown() { - xxx_messageInfo_Strings.DiscardUnknown(m) -} - -var xxx_messageInfo_Strings proto.InternalMessageInfo - -func (m *Strings) GetStringField() string { - if m != nil && m.StringField != nil { - return *m.StringField - } - return "" -} - -func (m *Strings) GetBytesField() []byte { - if m != nil { - return m.BytesField - } - return nil -} - -type Defaults struct { - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,def=1" json:"F_Bool,omitempty"` - F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,def=32" json:"F_Int32,omitempty"` - F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,def=64" json:"F_Int64,omitempty"` - F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,def=320" json:"F_Fixed32,omitempty"` - F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,def=640" json:"F_Fixed64,omitempty"` - F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,def=3200" json:"F_Uint32,omitempty"` - F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,def=6400" json:"F_Uint64,omitempty"` - F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,def=314159" json:"F_Float,omitempty"` - F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,def=271828" json:"F_Double,omitempty"` - F_String *string `protobuf:"bytes,10,opt,name=F_String,json=FString,def=hello, \"world!\"\n" json:"F_String,omitempty"` - F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,def=Bignose" json:"F_Bytes,omitempty"` - F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,def=-32" json:"F_Sint32,omitempty"` - F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,def=-64" json:"F_Sint64,omitempty"` - F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=test_proto.Defaults_Color,def=1" json:"F_Enum,omitempty"` - // More fields with crazy defaults. - F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=FPinf,def=inf" json:"F_Pinf,omitempty"` - F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=FNinf,def=-inf" json:"F_Ninf,omitempty"` - F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=FNan,def=nan" json:"F_Nan,omitempty"` - // Sub-message. - Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` - // Redundant but explicit defaults. - StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Defaults) Reset() { *m = Defaults{} } -func (m *Defaults) String() string { return proto.CompactTextString(m) } -func (*Defaults) ProtoMessage() {} -func (*Defaults) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{21} -} -func (m *Defaults) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Defaults.Unmarshal(m, b) -} -func (m *Defaults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Defaults.Marshal(b, m, deterministic) -} -func (dst *Defaults) XXX_Merge(src proto.Message) { - xxx_messageInfo_Defaults.Merge(dst, src) -} -func (m *Defaults) XXX_Size() int { - return xxx_messageInfo_Defaults.Size(m) -} -func (m *Defaults) XXX_DiscardUnknown() { - xxx_messageInfo_Defaults.DiscardUnknown(m) -} - -var xxx_messageInfo_Defaults proto.InternalMessageInfo - -const Default_Defaults_F_Bool bool = true -const Default_Defaults_F_Int32 int32 = 32 -const Default_Defaults_F_Int64 int64 = 64 -const Default_Defaults_F_Fixed32 uint32 = 320 -const Default_Defaults_F_Fixed64 uint64 = 640 -const Default_Defaults_F_Uint32 uint32 = 3200 -const Default_Defaults_F_Uint64 uint64 = 6400 -const Default_Defaults_F_Float float32 = 314159 -const Default_Defaults_F_Double float64 = 271828 -const Default_Defaults_F_String string = "hello, \"world!\"\n" - -var Default_Defaults_F_Bytes []byte = []byte("Bignose") - -const Default_Defaults_F_Sint32 int32 = -32 -const Default_Defaults_F_Sint64 int64 = -64 -const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN - -var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) -var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) -var Default_Defaults_F_Nan float32 = float32(math.NaN()) - -func (m *Defaults) GetF_Bool() bool { - if m != nil && m.F_Bool != nil { - return *m.F_Bool - } - return Default_Defaults_F_Bool -} - -func (m *Defaults) GetF_Int32() int32 { - if m != nil && m.F_Int32 != nil { - return *m.F_Int32 - } - return Default_Defaults_F_Int32 -} - -func (m *Defaults) GetF_Int64() int64 { - if m != nil && m.F_Int64 != nil { - return *m.F_Int64 - } - return Default_Defaults_F_Int64 -} - -func (m *Defaults) GetF_Fixed32() uint32 { - if m != nil && m.F_Fixed32 != nil { - return *m.F_Fixed32 - } - return Default_Defaults_F_Fixed32 -} - -func (m *Defaults) GetF_Fixed64() uint64 { - if m != nil && m.F_Fixed64 != nil { - return *m.F_Fixed64 - } - return Default_Defaults_F_Fixed64 -} - -func (m *Defaults) GetF_Uint32() uint32 { - if m != nil && m.F_Uint32 != nil { - return *m.F_Uint32 - } - return Default_Defaults_F_Uint32 -} - -func (m *Defaults) GetF_Uint64() uint64 { - if m != nil && m.F_Uint64 != nil { - return *m.F_Uint64 - } - return Default_Defaults_F_Uint64 -} - -func (m *Defaults) GetF_Float() float32 { - if m != nil && m.F_Float != nil { - return *m.F_Float - } - return Default_Defaults_F_Float -} - -func (m *Defaults) GetF_Double() float64 { - if m != nil && m.F_Double != nil { - return *m.F_Double - } - return Default_Defaults_F_Double -} - -func (m *Defaults) GetF_String() string { - if m != nil && m.F_String != nil { - return *m.F_String - } - return Default_Defaults_F_String -} - -func (m *Defaults) GetF_Bytes() []byte { - if m != nil && m.F_Bytes != nil { - return m.F_Bytes - } - return append([]byte(nil), Default_Defaults_F_Bytes...) -} - -func (m *Defaults) GetF_Sint32() int32 { - if m != nil && m.F_Sint32 != nil { - return *m.F_Sint32 - } - return Default_Defaults_F_Sint32 -} - -func (m *Defaults) GetF_Sint64() int64 { - if m != nil && m.F_Sint64 != nil { - return *m.F_Sint64 - } - return Default_Defaults_F_Sint64 -} - -func (m *Defaults) GetF_Enum() Defaults_Color { - if m != nil && m.F_Enum != nil { - return *m.F_Enum - } - return Default_Defaults_F_Enum -} - -func (m *Defaults) GetF_Pinf() float32 { - if m != nil && m.F_Pinf != nil { - return *m.F_Pinf - } - return Default_Defaults_F_Pinf -} - -func (m *Defaults) GetF_Ninf() float32 { - if m != nil && m.F_Ninf != nil { - return *m.F_Ninf - } - return Default_Defaults_F_Ninf -} - -func (m *Defaults) GetF_Nan() float32 { - if m != nil && m.F_Nan != nil { - return *m.F_Nan - } - return Default_Defaults_F_Nan -} - -func (m *Defaults) GetSub() *SubDefaults { - if m != nil { - return m.Sub - } - return nil -} - -func (m *Defaults) GetStrZero() string { - if m != nil && m.StrZero != nil { - return *m.StrZero - } - return "" -} - -type SubDefaults struct { - N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SubDefaults) Reset() { *m = SubDefaults{} } -func (m *SubDefaults) String() string { return proto.CompactTextString(m) } -func (*SubDefaults) ProtoMessage() {} -func (*SubDefaults) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{22} -} -func (m *SubDefaults) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SubDefaults.Unmarshal(m, b) -} -func (m *SubDefaults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SubDefaults.Marshal(b, m, deterministic) -} -func (dst *SubDefaults) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubDefaults.Merge(dst, src) -} -func (m *SubDefaults) XXX_Size() int { - return xxx_messageInfo_SubDefaults.Size(m) -} -func (m *SubDefaults) XXX_DiscardUnknown() { - xxx_messageInfo_SubDefaults.DiscardUnknown(m) -} - -var xxx_messageInfo_SubDefaults proto.InternalMessageInfo - -const Default_SubDefaults_N int64 = 7 - -func (m *SubDefaults) GetN() int64 { - if m != nil && m.N != nil { - return *m.N - } - return Default_SubDefaults_N -} - -type RepeatedEnum struct { - Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=test_proto.RepeatedEnum_Color" json:"color,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } -func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } -func (*RepeatedEnum) ProtoMessage() {} -func (*RepeatedEnum) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{23} -} -func (m *RepeatedEnum) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RepeatedEnum.Unmarshal(m, b) -} -func (m *RepeatedEnum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RepeatedEnum.Marshal(b, m, deterministic) -} -func (dst *RepeatedEnum) XXX_Merge(src proto.Message) { - xxx_messageInfo_RepeatedEnum.Merge(dst, src) -} -func (m *RepeatedEnum) XXX_Size() int { - return xxx_messageInfo_RepeatedEnum.Size(m) -} -func (m *RepeatedEnum) XXX_DiscardUnknown() { - xxx_messageInfo_RepeatedEnum.DiscardUnknown(m) -} - -var xxx_messageInfo_RepeatedEnum proto.InternalMessageInfo - -func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { - if m != nil { - return m.Color - } - return nil -} - -type MoreRepeated struct { - Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` - BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"` - Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` - IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"` - Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"` - Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` - Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } -func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } -func (*MoreRepeated) ProtoMessage() {} -func (*MoreRepeated) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{24} -} -func (m *MoreRepeated) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MoreRepeated.Unmarshal(m, b) -} -func (m *MoreRepeated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MoreRepeated.Marshal(b, m, deterministic) -} -func (dst *MoreRepeated) XXX_Merge(src proto.Message) { - xxx_messageInfo_MoreRepeated.Merge(dst, src) -} -func (m *MoreRepeated) XXX_Size() int { - return xxx_messageInfo_MoreRepeated.Size(m) -} -func (m *MoreRepeated) XXX_DiscardUnknown() { - xxx_messageInfo_MoreRepeated.DiscardUnknown(m) -} - -var xxx_messageInfo_MoreRepeated proto.InternalMessageInfo - -func (m *MoreRepeated) GetBools() []bool { - if m != nil { - return m.Bools - } - return nil -} - -func (m *MoreRepeated) GetBoolsPacked() []bool { - if m != nil { - return m.BoolsPacked - } - return nil -} - -func (m *MoreRepeated) GetInts() []int32 { - if m != nil { - return m.Ints - } - return nil -} - -func (m *MoreRepeated) GetIntsPacked() []int32 { - if m != nil { - return m.IntsPacked - } - return nil -} - -func (m *MoreRepeated) GetInt64SPacked() []int64 { - if m != nil { - return m.Int64SPacked - } - return nil -} - -func (m *MoreRepeated) GetStrings() []string { - if m != nil { - return m.Strings - } - return nil -} - -func (m *MoreRepeated) GetFixeds() []uint32 { - if m != nil { - return m.Fixeds - } - return nil -} - -type GroupOld struct { - G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GroupOld) Reset() { *m = GroupOld{} } -func (m *GroupOld) String() string { return proto.CompactTextString(m) } -func (*GroupOld) ProtoMessage() {} -func (*GroupOld) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{25} -} -func (m *GroupOld) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GroupOld.Unmarshal(m, b) -} -func (m *GroupOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GroupOld.Marshal(b, m, deterministic) -} -func (dst *GroupOld) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupOld.Merge(dst, src) -} -func (m *GroupOld) XXX_Size() int { - return xxx_messageInfo_GroupOld.Size(m) -} -func (m *GroupOld) XXX_DiscardUnknown() { - xxx_messageInfo_GroupOld.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupOld proto.InternalMessageInfo - -func (m *GroupOld) GetG() *GroupOld_G { - if m != nil { - return m.G - } - return nil -} - -type GroupOld_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } -func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } -func (*GroupOld_G) ProtoMessage() {} -func (*GroupOld_G) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{25, 0} -} -func (m *GroupOld_G) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GroupOld_G.Unmarshal(m, b) -} -func (m *GroupOld_G) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GroupOld_G.Marshal(b, m, deterministic) -} -func (dst *GroupOld_G) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupOld_G.Merge(dst, src) -} -func (m *GroupOld_G) XXX_Size() int { - return xxx_messageInfo_GroupOld_G.Size(m) -} -func (m *GroupOld_G) XXX_DiscardUnknown() { - xxx_messageInfo_GroupOld_G.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupOld_G proto.InternalMessageInfo - -func (m *GroupOld_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type GroupNew struct { - G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GroupNew) Reset() { *m = GroupNew{} } -func (m *GroupNew) String() string { return proto.CompactTextString(m) } -func (*GroupNew) ProtoMessage() {} -func (*GroupNew) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{26} -} -func (m *GroupNew) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GroupNew.Unmarshal(m, b) -} -func (m *GroupNew) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GroupNew.Marshal(b, m, deterministic) -} -func (dst *GroupNew) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupNew.Merge(dst, src) -} -func (m *GroupNew) XXX_Size() int { - return xxx_messageInfo_GroupNew.Size(m) -} -func (m *GroupNew) XXX_DiscardUnknown() { - xxx_messageInfo_GroupNew.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupNew proto.InternalMessageInfo - -func (m *GroupNew) GetG() *GroupNew_G { - if m != nil { - return m.G - } - return nil -} - -type GroupNew_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } -func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } -func (*GroupNew_G) ProtoMessage() {} -func (*GroupNew_G) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{26, 0} -} -func (m *GroupNew_G) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GroupNew_G.Unmarshal(m, b) -} -func (m *GroupNew_G) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GroupNew_G.Marshal(b, m, deterministic) -} -func (dst *GroupNew_G) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupNew_G.Merge(dst, src) -} -func (m *GroupNew_G) XXX_Size() int { - return xxx_messageInfo_GroupNew_G.Size(m) -} -func (m *GroupNew_G) XXX_DiscardUnknown() { - xxx_messageInfo_GroupNew_G.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupNew_G proto.InternalMessageInfo - -func (m *GroupNew_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *GroupNew_G) GetY() int32 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type FloatingPoint struct { - F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` - Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } -func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } -func (*FloatingPoint) ProtoMessage() {} -func (*FloatingPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{27} -} -func (m *FloatingPoint) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FloatingPoint.Unmarshal(m, b) -} -func (m *FloatingPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FloatingPoint.Marshal(b, m, deterministic) -} -func (dst *FloatingPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_FloatingPoint.Merge(dst, src) -} -func (m *FloatingPoint) XXX_Size() int { - return xxx_messageInfo_FloatingPoint.Size(m) -} -func (m *FloatingPoint) XXX_DiscardUnknown() { - xxx_messageInfo_FloatingPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_FloatingPoint proto.InternalMessageInfo - -func (m *FloatingPoint) GetF() float64 { - if m != nil && m.F != nil { - return *m.F - } - return 0 -} - -func (m *FloatingPoint) GetExact() bool { - if m != nil && m.Exact != nil { - return *m.Exact - } - return false -} - -type MessageWithMap struct { - NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} -func (*MessageWithMap) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{28} -} -func (m *MessageWithMap) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageWithMap.Unmarshal(m, b) -} -func (m *MessageWithMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageWithMap.Marshal(b, m, deterministic) -} -func (dst *MessageWithMap) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageWithMap.Merge(dst, src) -} -func (m *MessageWithMap) XXX_Size() int { - return xxx_messageInfo_MessageWithMap.Size(m) -} -func (m *MessageWithMap) XXX_DiscardUnknown() { - xxx_messageInfo_MessageWithMap.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageWithMap proto.InternalMessageInfo - -func (m *MessageWithMap) GetNameMapping() map[int32]string { - if m != nil { - return m.NameMapping - } - return nil -} - -func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { - if m != nil { - return m.MsgMapping - } - return nil -} - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -func (m *MessageWithMap) GetStrToStr() map[string]string { - if m != nil { - return m.StrToStr - } - return nil -} - -type Oneof struct { - // Types that are valid to be assigned to Union: - // *Oneof_F_Bool - // *Oneof_F_Int32 - // *Oneof_F_Int64 - // *Oneof_F_Fixed32 - // *Oneof_F_Fixed64 - // *Oneof_F_Uint32 - // *Oneof_F_Uint64 - // *Oneof_F_Float - // *Oneof_F_Double - // *Oneof_F_String - // *Oneof_F_Bytes - // *Oneof_F_Sint32 - // *Oneof_F_Sint64 - // *Oneof_F_Enum - // *Oneof_F_Message - // *Oneof_FGroup - // *Oneof_F_Largest_Tag - Union isOneof_Union `protobuf_oneof:"union"` - // Types that are valid to be assigned to Tormato: - // *Oneof_Value - Tormato isOneof_Tormato `protobuf_oneof:"tormato"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oneof) Reset() { *m = Oneof{} } -func (m *Oneof) String() string { return proto.CompactTextString(m) } -func (*Oneof) ProtoMessage() {} -func (*Oneof) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{29} -} -func (m *Oneof) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oneof.Unmarshal(m, b) -} -func (m *Oneof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oneof.Marshal(b, m, deterministic) -} -func (dst *Oneof) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oneof.Merge(dst, src) -} -func (m *Oneof) XXX_Size() int { - return xxx_messageInfo_Oneof.Size(m) -} -func (m *Oneof) XXX_DiscardUnknown() { - xxx_messageInfo_Oneof.DiscardUnknown(m) -} - -var xxx_messageInfo_Oneof proto.InternalMessageInfo - -type isOneof_Union interface { - isOneof_Union() -} - -type Oneof_F_Bool struct { - F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,oneof"` -} - -type Oneof_F_Int32 struct { - F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,oneof"` -} - -type Oneof_F_Int64 struct { - F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,oneof"` -} - -type Oneof_F_Fixed32 struct { - F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,oneof"` -} - -type Oneof_F_Fixed64 struct { - F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,oneof"` -} - -type Oneof_F_Uint32 struct { - F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,oneof"` -} - -type Oneof_F_Uint64 struct { - F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,oneof"` -} - -type Oneof_F_Float struct { - F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,oneof"` -} - -type Oneof_F_Double struct { - F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,oneof"` -} - -type Oneof_F_String struct { - F_String string `protobuf:"bytes,10,opt,name=F_String,json=FString,oneof"` -} - -type Oneof_F_Bytes struct { - F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,oneof"` -} - -type Oneof_F_Sint32 struct { - F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,oneof"` -} - -type Oneof_F_Sint64 struct { - F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,oneof"` -} - -type Oneof_F_Enum struct { - F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=test_proto.MyMessage_Color,oneof"` -} - -type Oneof_F_Message struct { - F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=FMessage,oneof"` -} - -type Oneof_FGroup struct { - FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"` -} - -type Oneof_F_Largest_Tag struct { - F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=FLargestTag,oneof"` -} - -func (*Oneof_F_Bool) isOneof_Union() {} - -func (*Oneof_F_Int32) isOneof_Union() {} - -func (*Oneof_F_Int64) isOneof_Union() {} - -func (*Oneof_F_Fixed32) isOneof_Union() {} - -func (*Oneof_F_Fixed64) isOneof_Union() {} - -func (*Oneof_F_Uint32) isOneof_Union() {} - -func (*Oneof_F_Uint64) isOneof_Union() {} - -func (*Oneof_F_Float) isOneof_Union() {} - -func (*Oneof_F_Double) isOneof_Union() {} - -func (*Oneof_F_String) isOneof_Union() {} - -func (*Oneof_F_Bytes) isOneof_Union() {} - -func (*Oneof_F_Sint32) isOneof_Union() {} - -func (*Oneof_F_Sint64) isOneof_Union() {} - -func (*Oneof_F_Enum) isOneof_Union() {} - -func (*Oneof_F_Message) isOneof_Union() {} - -func (*Oneof_FGroup) isOneof_Union() {} - -func (*Oneof_F_Largest_Tag) isOneof_Union() {} - -func (m *Oneof) GetUnion() isOneof_Union { - if m != nil { - return m.Union - } - return nil -} - -func (m *Oneof) GetF_Bool() bool { - if x, ok := m.GetUnion().(*Oneof_F_Bool); ok { - return x.F_Bool - } - return false -} - -func (m *Oneof) GetF_Int32() int32 { - if x, ok := m.GetUnion().(*Oneof_F_Int32); ok { - return x.F_Int32 - } - return 0 -} - -func (m *Oneof) GetF_Int64() int64 { - if x, ok := m.GetUnion().(*Oneof_F_Int64); ok { - return x.F_Int64 - } - return 0 -} - -func (m *Oneof) GetF_Fixed32() uint32 { - if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok { - return x.F_Fixed32 - } - return 0 -} - -func (m *Oneof) GetF_Fixed64() uint64 { - if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok { - return x.F_Fixed64 - } - return 0 -} - -func (m *Oneof) GetF_Uint32() uint32 { - if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok { - return x.F_Uint32 - } - return 0 -} - -func (m *Oneof) GetF_Uint64() uint64 { - if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok { - return x.F_Uint64 - } - return 0 -} - -func (m *Oneof) GetF_Float() float32 { - if x, ok := m.GetUnion().(*Oneof_F_Float); ok { - return x.F_Float - } - return 0 -} - -func (m *Oneof) GetF_Double() float64 { - if x, ok := m.GetUnion().(*Oneof_F_Double); ok { - return x.F_Double - } - return 0 -} - -func (m *Oneof) GetF_String() string { - if x, ok := m.GetUnion().(*Oneof_F_String); ok { - return x.F_String - } - return "" -} - -func (m *Oneof) GetF_Bytes() []byte { - if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok { - return x.F_Bytes - } - return nil -} - -func (m *Oneof) GetF_Sint32() int32 { - if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok { - return x.F_Sint32 - } - return 0 -} - -func (m *Oneof) GetF_Sint64() int64 { - if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok { - return x.F_Sint64 - } - return 0 -} - -func (m *Oneof) GetF_Enum() MyMessage_Color { - if x, ok := m.GetUnion().(*Oneof_F_Enum); ok { - return x.F_Enum - } - return MyMessage_RED -} - -func (m *Oneof) GetF_Message() *GoTestField { - if x, ok := m.GetUnion().(*Oneof_F_Message); ok { - return x.F_Message - } - return nil -} - -func (m *Oneof) GetFGroup() *Oneof_F_Group { - if x, ok := m.GetUnion().(*Oneof_FGroup); ok { - return x.FGroup - } - return nil -} - -func (m *Oneof) GetF_Largest_Tag() int32 { - if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok { - return x.F_Largest_Tag - } - return 0 -} - -type isOneof_Tormato interface { - isOneof_Tormato() -} - -type Oneof_Value struct { - Value int32 `protobuf:"varint,100,opt,name=value,oneof"` -} - -func (*Oneof_Value) isOneof_Tormato() {} - -func (m *Oneof) GetTormato() isOneof_Tormato { - if m != nil { - return m.Tormato - } - return nil -} - -func (m *Oneof) GetValue() int32 { - if x, ok := m.GetTormato().(*Oneof_Value); ok { - return x.Value - } - return 0 -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{ - (*Oneof_F_Bool)(nil), - (*Oneof_F_Int32)(nil), - (*Oneof_F_Int64)(nil), - (*Oneof_F_Fixed32)(nil), - (*Oneof_F_Fixed64)(nil), - (*Oneof_F_Uint32)(nil), - (*Oneof_F_Uint64)(nil), - (*Oneof_F_Float)(nil), - (*Oneof_F_Double)(nil), - (*Oneof_F_String)(nil), - (*Oneof_F_Bytes)(nil), - (*Oneof_F_Sint32)(nil), - (*Oneof_F_Sint64)(nil), - (*Oneof_F_Enum)(nil), - (*Oneof_F_Message)(nil), - (*Oneof_FGroup)(nil), - (*Oneof_F_Largest_Tag)(nil), - (*Oneof_Value)(nil), - } -} - -func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Oneof) - // union - switch x := m.Union.(type) { - case *Oneof_F_Bool: - t := uint64(0) - if x.F_Bool { - t = 1 - } - b.EncodeVarint(1<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *Oneof_F_Int32: - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Int32)) - case *Oneof_F_Int64: - b.EncodeVarint(3<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Int64)) - case *Oneof_F_Fixed32: - b.EncodeVarint(4<<3 | proto.WireFixed32) - b.EncodeFixed32(uint64(x.F_Fixed32)) - case *Oneof_F_Fixed64: - b.EncodeVarint(5<<3 | proto.WireFixed64) - b.EncodeFixed64(uint64(x.F_Fixed64)) - case *Oneof_F_Uint32: - b.EncodeVarint(6<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Uint32)) - case *Oneof_F_Uint64: - b.EncodeVarint(7<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Uint64)) - case *Oneof_F_Float: - b.EncodeVarint(8<<3 | proto.WireFixed32) - b.EncodeFixed32(uint64(math.Float32bits(x.F_Float))) - case *Oneof_F_Double: - b.EncodeVarint(9<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.F_Double)) - case *Oneof_F_String: - b.EncodeVarint(10<<3 | proto.WireBytes) - b.EncodeStringBytes(x.F_String) - case *Oneof_F_Bytes: - b.EncodeVarint(11<<3 | proto.WireBytes) - b.EncodeRawBytes(x.F_Bytes) - case *Oneof_F_Sint32: - b.EncodeVarint(12<<3 | proto.WireVarint) - b.EncodeZigzag32(uint64(x.F_Sint32)) - case *Oneof_F_Sint64: - b.EncodeVarint(13<<3 | proto.WireVarint) - b.EncodeZigzag64(uint64(x.F_Sint64)) - case *Oneof_F_Enum: - b.EncodeVarint(14<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Enum)) - case *Oneof_F_Message: - b.EncodeVarint(15<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.F_Message); err != nil { - return err - } - case *Oneof_FGroup: - b.EncodeVarint(16<<3 | proto.WireStartGroup) - if err := b.Marshal(x.FGroup); err != nil { - return err - } - b.EncodeVarint(16<<3 | proto.WireEndGroup) - case *Oneof_F_Largest_Tag: - b.EncodeVarint(536870911<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Largest_Tag)) - case nil: - default: - return fmt.Errorf("Oneof.Union has unexpected type %T", x) - } - // tormato - switch x := m.Tormato.(type) { - case *Oneof_Value: - b.EncodeVarint(100<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Value)) - case nil: - default: - return fmt.Errorf("Oneof.Tormato has unexpected type %T", x) - } - return nil -} - -func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Oneof) - switch tag { - case 1: // union.F_Bool - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Bool{x != 0} - return true, err - case 2: // union.F_Int32 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Int32{int32(x)} - return true, err - case 3: // union.F_Int64 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Int64{int64(x)} - return true, err - case 4: // union.F_Fixed32 - if wire != proto.WireFixed32 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed32() - m.Union = &Oneof_F_Fixed32{uint32(x)} - return true, err - case 5: // union.F_Fixed64 - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Union = &Oneof_F_Fixed64{x} - return true, err - case 6: // union.F_Uint32 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Uint32{uint32(x)} - return true, err - case 7: // union.F_Uint64 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Uint64{x} - return true, err - case 8: // union.F_Float - if wire != proto.WireFixed32 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed32() - m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))} - return true, err - case 9: // union.F_Double - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Union = &Oneof_F_Double{math.Float64frombits(x)} - return true, err - case 10: // union.F_String - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &Oneof_F_String{x} - return true, err - case 11: // union.F_Bytes - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Union = &Oneof_F_Bytes{x} - return true, err - case 12: // union.F_Sint32 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag32() - m.Union = &Oneof_F_Sint32{int32(x)} - return true, err - case 13: // union.F_Sint64 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag64() - m.Union = &Oneof_F_Sint64{int64(x)} - return true, err - case 14: // union.F_Enum - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Enum{MyMessage_Color(x)} - return true, err - case 15: // union.F_Message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(GoTestField) - err := b.DecodeMessage(msg) - m.Union = &Oneof_F_Message{msg} - return true, err - case 16: // union.f_group - if wire != proto.WireStartGroup { - return true, proto.ErrInternalBadWireType - } - msg := new(Oneof_F_Group) - err := b.DecodeGroup(msg) - m.Union = &Oneof_FGroup{msg} - return true, err - case 536870911: // union.F_Largest_Tag - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Largest_Tag{int32(x)} - return true, err - case 100: // tormato.value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Tormato = &Oneof_Value{int32(x)} - return true, err - default: - return false, nil - } -} - -func _Oneof_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Oneof) - // union - switch x := m.Union.(type) { - case *Oneof_F_Bool: - n += 1 // tag and wire - n += 1 - case *Oneof_F_Int32: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.F_Int32)) - case *Oneof_F_Int64: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.F_Int64)) - case *Oneof_F_Fixed32: - n += 1 // tag and wire - n += 4 - case *Oneof_F_Fixed64: - n += 1 // tag and wire - n += 8 - case *Oneof_F_Uint32: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.F_Uint32)) - case *Oneof_F_Uint64: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.F_Uint64)) - case *Oneof_F_Float: - n += 1 // tag and wire - n += 4 - case *Oneof_F_Double: - n += 1 // tag and wire - n += 8 - case *Oneof_F_String: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.F_String))) - n += len(x.F_String) - case *Oneof_F_Bytes: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.F_Bytes))) - n += len(x.F_Bytes) - case *Oneof_F_Sint32: - n += 1 // tag and wire - n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31)))) - case *Oneof_F_Sint64: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63)))) - case *Oneof_F_Enum: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.F_Enum)) - case *Oneof_F_Message: - s := proto.Size(x.F_Message) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *Oneof_FGroup: - n += 2 // tag and wire - n += proto.Size(x.FGroup) - n += 2 // tag and wire - case *Oneof_F_Largest_Tag: - n += 5 // tag and wire - n += proto.SizeVarint(uint64(x.F_Largest_Tag)) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - // tormato - switch x := m.Tormato.(type) { - case *Oneof_Value: - n += 2 // tag and wire - n += proto.SizeVarint(uint64(x.Value)) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Oneof_F_Group struct { - X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } -func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } -func (*Oneof_F_Group) ProtoMessage() {} -func (*Oneof_F_Group) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{29, 0} -} -func (m *Oneof_F_Group) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oneof_F_Group.Unmarshal(m, b) -} -func (m *Oneof_F_Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oneof_F_Group.Marshal(b, m, deterministic) -} -func (dst *Oneof_F_Group) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oneof_F_Group.Merge(dst, src) -} -func (m *Oneof_F_Group) XXX_Size() int { - return xxx_messageInfo_Oneof_F_Group.Size(m) -} -func (m *Oneof_F_Group) XXX_DiscardUnknown() { - xxx_messageInfo_Oneof_F_Group.DiscardUnknown(m) -} - -var xxx_messageInfo_Oneof_F_Group proto.InternalMessageInfo - -func (m *Oneof_F_Group) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type Communique struct { - MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` - // This is a oneof, called "union". - // - // Types that are valid to be assigned to Union: - // *Communique_Number - // *Communique_Name - // *Communique_Data - // *Communique_TempC - // *Communique_Col - // *Communique_Msg - Union isCommunique_Union `protobuf_oneof:"union"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Communique) Reset() { *m = Communique{} } -func (m *Communique) String() string { return proto.CompactTextString(m) } -func (*Communique) ProtoMessage() {} -func (*Communique) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{30} -} -func (m *Communique) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Communique.Unmarshal(m, b) -} -func (m *Communique) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Communique.Marshal(b, m, deterministic) -} -func (dst *Communique) XXX_Merge(src proto.Message) { - xxx_messageInfo_Communique.Merge(dst, src) -} -func (m *Communique) XXX_Size() int { - return xxx_messageInfo_Communique.Size(m) -} -func (m *Communique) XXX_DiscardUnknown() { - xxx_messageInfo_Communique.DiscardUnknown(m) -} - -var xxx_messageInfo_Communique proto.InternalMessageInfo - -func (m *Communique) GetMakeMeCry() bool { - if m != nil && m.MakeMeCry != nil { - return *m.MakeMeCry - } - return false -} - -type isCommunique_Union interface { - isCommunique_Union() -} - -type Communique_Number struct { - Number int32 `protobuf:"varint,5,opt,name=number,oneof"` -} - -type Communique_Name struct { - Name string `protobuf:"bytes,6,opt,name=name,oneof"` -} - -type Communique_Data struct { - Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` -} - -type Communique_TempC struct { - TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` -} - -type Communique_Col struct { - Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=test_proto.MyMessage_Color,oneof"` -} - -type Communique_Msg struct { - Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"` -} - -func (*Communique_Number) isCommunique_Union() {} - -func (*Communique_Name) isCommunique_Union() {} - -func (*Communique_Data) isCommunique_Union() {} - -func (*Communique_TempC) isCommunique_Union() {} - -func (*Communique_Col) isCommunique_Union() {} - -func (*Communique_Msg) isCommunique_Union() {} - -func (m *Communique) GetUnion() isCommunique_Union { - if m != nil { - return m.Union - } - return nil -} - -func (m *Communique) GetNumber() int32 { - if x, ok := m.GetUnion().(*Communique_Number); ok { - return x.Number - } - return 0 -} - -func (m *Communique) GetName() string { - if x, ok := m.GetUnion().(*Communique_Name); ok { - return x.Name - } - return "" -} - -func (m *Communique) GetData() []byte { - if x, ok := m.GetUnion().(*Communique_Data); ok { - return x.Data - } - return nil -} - -func (m *Communique) GetTempC() float64 { - if x, ok := m.GetUnion().(*Communique_TempC); ok { - return x.TempC - } - return 0 -} - -func (m *Communique) GetCol() MyMessage_Color { - if x, ok := m.GetUnion().(*Communique_Col); ok { - return x.Col - } - return MyMessage_RED -} - -func (m *Communique) GetMsg() *Strings { - if x, ok := m.GetUnion().(*Communique_Msg); ok { - return x.Msg - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ - (*Communique_Number)(nil), - (*Communique_Name)(nil), - (*Communique_Data)(nil), - (*Communique_TempC)(nil), - (*Communique_Col)(nil), - (*Communique_Msg)(nil), - } -} - -func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Communique) - // union - switch x := m.Union.(type) { - case *Communique_Number: - b.EncodeVarint(5<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Number)) - case *Communique_Name: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Name) - case *Communique_Data: - b.EncodeVarint(7<<3 | proto.WireBytes) - b.EncodeRawBytes(x.Data) - case *Communique_TempC: - b.EncodeVarint(8<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.TempC)) - case *Communique_Col: - b.EncodeVarint(9<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Col)) - case *Communique_Msg: - b.EncodeVarint(10<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Msg); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Communique.Union has unexpected type %T", x) - } - return nil -} - -func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Communique) - switch tag { - case 5: // union.number - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Number{int32(x)} - return true, err - case 6: // union.name - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &Communique_Name{x} - return true, err - case 7: // union.data - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Union = &Communique_Data{x} - return true, err - case 8: // union.temp_c - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Union = &Communique_TempC{math.Float64frombits(x)} - return true, err - case 9: // union.col - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Col{MyMessage_Color(x)} - return true, err - case 10: // union.msg - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Strings) - err := b.DecodeMessage(msg) - m.Union = &Communique_Msg{msg} - return true, err - default: - return false, nil - } -} - -func _Communique_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Communique) - // union - switch x := m.Union.(type) { - case *Communique_Number: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.Number)) - case *Communique_Name: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Name))) - n += len(x.Name) - case *Communique_Data: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Data))) - n += len(x.Data) - case *Communique_TempC: - n += 1 // tag and wire - n += 8 - case *Communique_Col: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.Col)) - case *Communique_Msg: - s := proto.Size(x.Msg) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type TestUTF8 struct { - Scalar *string `protobuf:"bytes,1,opt,name=scalar" json:"scalar,omitempty"` - Vector []string `protobuf:"bytes,2,rep,name=vector" json:"vector,omitempty"` - // Types that are valid to be assigned to Oneof: - // *TestUTF8_Field - Oneof isTestUTF8_Oneof `protobuf_oneof:"oneof"` - MapKey map[string]int64 `protobuf:"bytes,4,rep,name=map_key,json=mapKey" json:"map_key,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - MapValue map[int64]string `protobuf:"bytes,5,rep,name=map_value,json=mapValue" json:"map_value,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TestUTF8) Reset() { *m = TestUTF8{} } -func (m *TestUTF8) String() string { return proto.CompactTextString(m) } -func (*TestUTF8) ProtoMessage() {} -func (*TestUTF8) Descriptor() ([]byte, []int) { - return fileDescriptor_test_ee9f66cbbebc227c, []int{31} -} -func (m *TestUTF8) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestUTF8.Unmarshal(m, b) -} -func (m *TestUTF8) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestUTF8.Marshal(b, m, deterministic) -} -func (dst *TestUTF8) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestUTF8.Merge(dst, src) -} -func (m *TestUTF8) XXX_Size() int { - return xxx_messageInfo_TestUTF8.Size(m) -} -func (m *TestUTF8) XXX_DiscardUnknown() { - xxx_messageInfo_TestUTF8.DiscardUnknown(m) -} - -var xxx_messageInfo_TestUTF8 proto.InternalMessageInfo - -func (m *TestUTF8) GetScalar() string { - if m != nil && m.Scalar != nil { - return *m.Scalar - } - return "" -} - -func (m *TestUTF8) GetVector() []string { - if m != nil { - return m.Vector - } - return nil -} - -type isTestUTF8_Oneof interface { - isTestUTF8_Oneof() -} - -type TestUTF8_Field struct { - Field string `protobuf:"bytes,3,opt,name=field,oneof"` -} - -func (*TestUTF8_Field) isTestUTF8_Oneof() {} - -func (m *TestUTF8) GetOneof() isTestUTF8_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *TestUTF8) GetField() string { - if x, ok := m.GetOneof().(*TestUTF8_Field); ok { - return x.Field - } - return "" -} - -func (m *TestUTF8) GetMapKey() map[string]int64 { - if m != nil { - return m.MapKey - } - return nil -} - -func (m *TestUTF8) GetMapValue() map[int64]string { - if m != nil { - return m.MapValue - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*TestUTF8) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _TestUTF8_OneofMarshaler, _TestUTF8_OneofUnmarshaler, _TestUTF8_OneofSizer, []interface{}{ - (*TestUTF8_Field)(nil), - } -} - -func _TestUTF8_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*TestUTF8) - // oneof - switch x := m.Oneof.(type) { - case *TestUTF8_Field: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Field) - case nil: - default: - return fmt.Errorf("TestUTF8.Oneof has unexpected type %T", x) - } - return nil -} - -func _TestUTF8_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*TestUTF8) - switch tag { - case 3: // oneof.field - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Oneof = &TestUTF8_Field{x} - return true, err - default: - return false, nil - } -} - -func _TestUTF8_OneofSizer(msg proto.Message) (n int) { - m := msg.(*TestUTF8) - // oneof - switch x := m.Oneof.(type) { - case *TestUTF8_Field: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Field))) - n += len(x.Field) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -var E_Greeting = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: ([]string)(nil), - Field: 106, - Name: "test_proto.greeting", - Tag: "bytes,106,rep,name=greeting", - Filename: "test_proto/test.proto", -} - -var E_Complex = &proto.ExtensionDesc{ - ExtendedType: (*OtherMessage)(nil), - ExtensionType: (*ComplexExtension)(nil), - Field: 200, - Name: "test_proto.complex", - Tag: "bytes,200,opt,name=complex", - Filename: "test_proto/test.proto", -} - -var E_RComplex = &proto.ExtensionDesc{ - ExtendedType: (*OtherMessage)(nil), - ExtensionType: ([]*ComplexExtension)(nil), - Field: 201, - Name: "test_proto.r_complex", - Tag: "bytes,201,rep,name=r_complex,json=rComplex", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 101, - Name: "test_proto.no_default_double", - Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 102, - Name: "test_proto.no_default_float", - Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 103, - Name: "test_proto.no_default_int32", - Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 104, - Name: "test_proto.no_default_int64", - Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 105, - Name: "test_proto.no_default_uint32", - Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 106, - Name: "test_proto.no_default_uint64", - Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 107, - Name: "test_proto.no_default_sint32", - Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 108, - Name: "test_proto.no_default_sint64", - Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 109, - Name: "test_proto.no_default_fixed32", - Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 110, - Name: "test_proto.no_default_fixed64", - Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 111, - Name: "test_proto.no_default_sfixed32", - Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 112, - Name: "test_proto.no_default_sfixed64", - Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 113, - Name: "test_proto.no_default_bool", - Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 114, - Name: "test_proto.no_default_string", - Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 115, - Name: "test_proto.no_default_bytes", - Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes", - Filename: "test_proto/test.proto", -} - -var E_NoDefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 116, - Name: "test_proto.no_default_enum", - Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=test_proto.DefaultsMessage_DefaultsEnum", - Filename: "test_proto/test.proto", -} - -var E_DefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 201, - Name: "test_proto.default_double", - Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415", - Filename: "test_proto/test.proto", -} - -var E_DefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 202, - Name: "test_proto.default_float", - Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14", - Filename: "test_proto/test.proto", -} - -var E_DefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 203, - Name: "test_proto.default_int32", - Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42", - Filename: "test_proto/test.proto", -} - -var E_DefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 204, - Name: "test_proto.default_int64", - Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43", - Filename: "test_proto/test.proto", -} - -var E_DefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 205, - Name: "test_proto.default_uint32", - Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44", - Filename: "test_proto/test.proto", -} - -var E_DefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 206, - Name: "test_proto.default_uint64", - Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45", - Filename: "test_proto/test.proto", -} - -var E_DefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 207, - Name: "test_proto.default_sint32", - Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46", - Filename: "test_proto/test.proto", -} - -var E_DefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 208, - Name: "test_proto.default_sint64", - Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47", - Filename: "test_proto/test.proto", -} - -var E_DefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 209, - Name: "test_proto.default_fixed32", - Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48", - Filename: "test_proto/test.proto", -} - -var E_DefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 210, - Name: "test_proto.default_fixed64", - Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49", - Filename: "test_proto/test.proto", -} - -var E_DefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 211, - Name: "test_proto.default_sfixed32", - Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50", - Filename: "test_proto/test.proto", -} - -var E_DefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 212, - Name: "test_proto.default_sfixed64", - Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51", - Filename: "test_proto/test.proto", -} - -var E_DefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 213, - Name: "test_proto.default_bool", - Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1", - Filename: "test_proto/test.proto", -} - -var E_DefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 214, - Name: "test_proto.default_string", - Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string,def=foo", - Filename: "test_proto/test.proto", -} - -var E_DefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 215, - Name: "test_proto.default_bytes", - Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes", - Filename: "test_proto/test.proto", -} - -var E_DefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 216, - Name: "test_proto.default_enum", - Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=test_proto.DefaultsMessage_DefaultsEnum,def=1", - Filename: "test_proto/test.proto", -} - -var E_X201 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 201, - Name: "test_proto.x201", - Tag: "bytes,201,opt,name=x201", - Filename: "test_proto/test.proto", -} - -var E_X202 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 202, - Name: "test_proto.x202", - Tag: "bytes,202,opt,name=x202", - Filename: "test_proto/test.proto", -} - -var E_X203 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 203, - Name: "test_proto.x203", - Tag: "bytes,203,opt,name=x203", - Filename: "test_proto/test.proto", -} - -var E_X204 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 204, - Name: "test_proto.x204", - Tag: "bytes,204,opt,name=x204", - Filename: "test_proto/test.proto", -} - -var E_X205 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 205, - Name: "test_proto.x205", - Tag: "bytes,205,opt,name=x205", - Filename: "test_proto/test.proto", -} - -var E_X206 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 206, - Name: "test_proto.x206", - Tag: "bytes,206,opt,name=x206", - Filename: "test_proto/test.proto", -} - -var E_X207 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 207, - Name: "test_proto.x207", - Tag: "bytes,207,opt,name=x207", - Filename: "test_proto/test.proto", -} - -var E_X208 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 208, - Name: "test_proto.x208", - Tag: "bytes,208,opt,name=x208", - Filename: "test_proto/test.proto", -} - -var E_X209 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 209, - Name: "test_proto.x209", - Tag: "bytes,209,opt,name=x209", - Filename: "test_proto/test.proto", -} - -var E_X210 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 210, - Name: "test_proto.x210", - Tag: "bytes,210,opt,name=x210", - Filename: "test_proto/test.proto", -} - -var E_X211 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 211, - Name: "test_proto.x211", - Tag: "bytes,211,opt,name=x211", - Filename: "test_proto/test.proto", -} - -var E_X212 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 212, - Name: "test_proto.x212", - Tag: "bytes,212,opt,name=x212", - Filename: "test_proto/test.proto", -} - -var E_X213 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 213, - Name: "test_proto.x213", - Tag: "bytes,213,opt,name=x213", - Filename: "test_proto/test.proto", -} - -var E_X214 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 214, - Name: "test_proto.x214", - Tag: "bytes,214,opt,name=x214", - Filename: "test_proto/test.proto", -} - -var E_X215 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 215, - Name: "test_proto.x215", - Tag: "bytes,215,opt,name=x215", - Filename: "test_proto/test.proto", -} - -var E_X216 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 216, - Name: "test_proto.x216", - Tag: "bytes,216,opt,name=x216", - Filename: "test_proto/test.proto", -} - -var E_X217 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 217, - Name: "test_proto.x217", - Tag: "bytes,217,opt,name=x217", - Filename: "test_proto/test.proto", -} - -var E_X218 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 218, - Name: "test_proto.x218", - Tag: "bytes,218,opt,name=x218", - Filename: "test_proto/test.proto", -} - -var E_X219 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 219, - Name: "test_proto.x219", - Tag: "bytes,219,opt,name=x219", - Filename: "test_proto/test.proto", -} - -var E_X220 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 220, - Name: "test_proto.x220", - Tag: "bytes,220,opt,name=x220", - Filename: "test_proto/test.proto", -} - -var E_X221 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 221, - Name: "test_proto.x221", - Tag: "bytes,221,opt,name=x221", - Filename: "test_proto/test.proto", -} - -var E_X222 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 222, - Name: "test_proto.x222", - Tag: "bytes,222,opt,name=x222", - Filename: "test_proto/test.proto", -} - -var E_X223 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 223, - Name: "test_proto.x223", - Tag: "bytes,223,opt,name=x223", - Filename: "test_proto/test.proto", -} - -var E_X224 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 224, - Name: "test_proto.x224", - Tag: "bytes,224,opt,name=x224", - Filename: "test_proto/test.proto", -} - -var E_X225 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 225, - Name: "test_proto.x225", - Tag: "bytes,225,opt,name=x225", - Filename: "test_proto/test.proto", -} - -var E_X226 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 226, - Name: "test_proto.x226", - Tag: "bytes,226,opt,name=x226", - Filename: "test_proto/test.proto", -} - -var E_X227 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 227, - Name: "test_proto.x227", - Tag: "bytes,227,opt,name=x227", - Filename: "test_proto/test.proto", -} - -var E_X228 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 228, - Name: "test_proto.x228", - Tag: "bytes,228,opt,name=x228", - Filename: "test_proto/test.proto", -} - -var E_X229 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 229, - Name: "test_proto.x229", - Tag: "bytes,229,opt,name=x229", - Filename: "test_proto/test.proto", -} - -var E_X230 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 230, - Name: "test_proto.x230", - Tag: "bytes,230,opt,name=x230", - Filename: "test_proto/test.proto", -} - -var E_X231 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 231, - Name: "test_proto.x231", - Tag: "bytes,231,opt,name=x231", - Filename: "test_proto/test.proto", -} - -var E_X232 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 232, - Name: "test_proto.x232", - Tag: "bytes,232,opt,name=x232", - Filename: "test_proto/test.proto", -} - -var E_X233 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 233, - Name: "test_proto.x233", - Tag: "bytes,233,opt,name=x233", - Filename: "test_proto/test.proto", -} - -var E_X234 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 234, - Name: "test_proto.x234", - Tag: "bytes,234,opt,name=x234", - Filename: "test_proto/test.proto", -} - -var E_X235 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 235, - Name: "test_proto.x235", - Tag: "bytes,235,opt,name=x235", - Filename: "test_proto/test.proto", -} - -var E_X236 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 236, - Name: "test_proto.x236", - Tag: "bytes,236,opt,name=x236", - Filename: "test_proto/test.proto", -} - -var E_X237 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 237, - Name: "test_proto.x237", - Tag: "bytes,237,opt,name=x237", - Filename: "test_proto/test.proto", -} - -var E_X238 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 238, - Name: "test_proto.x238", - Tag: "bytes,238,opt,name=x238", - Filename: "test_proto/test.proto", -} - -var E_X239 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 239, - Name: "test_proto.x239", - Tag: "bytes,239,opt,name=x239", - Filename: "test_proto/test.proto", -} - -var E_X240 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 240, - Name: "test_proto.x240", - Tag: "bytes,240,opt,name=x240", - Filename: "test_proto/test.proto", -} - -var E_X241 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 241, - Name: "test_proto.x241", - Tag: "bytes,241,opt,name=x241", - Filename: "test_proto/test.proto", -} - -var E_X242 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 242, - Name: "test_proto.x242", - Tag: "bytes,242,opt,name=x242", - Filename: "test_proto/test.proto", -} - -var E_X243 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 243, - Name: "test_proto.x243", - Tag: "bytes,243,opt,name=x243", - Filename: "test_proto/test.proto", -} - -var E_X244 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 244, - Name: "test_proto.x244", - Tag: "bytes,244,opt,name=x244", - Filename: "test_proto/test.proto", -} - -var E_X245 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 245, - Name: "test_proto.x245", - Tag: "bytes,245,opt,name=x245", - Filename: "test_proto/test.proto", -} - -var E_X246 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 246, - Name: "test_proto.x246", - Tag: "bytes,246,opt,name=x246", - Filename: "test_proto/test.proto", -} - -var E_X247 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 247, - Name: "test_proto.x247", - Tag: "bytes,247,opt,name=x247", - Filename: "test_proto/test.proto", -} - -var E_X248 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 248, - Name: "test_proto.x248", - Tag: "bytes,248,opt,name=x248", - Filename: "test_proto/test.proto", -} - -var E_X249 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 249, - Name: "test_proto.x249", - Tag: "bytes,249,opt,name=x249", - Filename: "test_proto/test.proto", -} - -var E_X250 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 250, - Name: "test_proto.x250", - Tag: "bytes,250,opt,name=x250", - Filename: "test_proto/test.proto", -} - -func init() { - proto.RegisterType((*GoEnum)(nil), "test_proto.GoEnum") - proto.RegisterType((*GoTestField)(nil), "test_proto.GoTestField") - proto.RegisterType((*GoTest)(nil), "test_proto.GoTest") - proto.RegisterType((*GoTest_RequiredGroup)(nil), "test_proto.GoTest.RequiredGroup") - proto.RegisterType((*GoTest_RepeatedGroup)(nil), "test_proto.GoTest.RepeatedGroup") - proto.RegisterType((*GoTest_OptionalGroup)(nil), "test_proto.GoTest.OptionalGroup") - proto.RegisterType((*GoTestRequiredGroupField)(nil), "test_proto.GoTestRequiredGroupField") - proto.RegisterType((*GoTestRequiredGroupField_Group)(nil), "test_proto.GoTestRequiredGroupField.Group") - proto.RegisterType((*GoSkipTest)(nil), "test_proto.GoSkipTest") - proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "test_proto.GoSkipTest.SkipGroup") - proto.RegisterType((*NonPackedTest)(nil), "test_proto.NonPackedTest") - proto.RegisterType((*PackedTest)(nil), "test_proto.PackedTest") - proto.RegisterType((*MaxTag)(nil), "test_proto.MaxTag") - proto.RegisterType((*OldMessage)(nil), "test_proto.OldMessage") - proto.RegisterType((*OldMessage_Nested)(nil), "test_proto.OldMessage.Nested") - proto.RegisterType((*NewMessage)(nil), "test_proto.NewMessage") - proto.RegisterType((*NewMessage_Nested)(nil), "test_proto.NewMessage.Nested") - proto.RegisterType((*InnerMessage)(nil), "test_proto.InnerMessage") - proto.RegisterType((*OtherMessage)(nil), "test_proto.OtherMessage") - proto.RegisterType((*RequiredInnerMessage)(nil), "test_proto.RequiredInnerMessage") - proto.RegisterType((*MyMessage)(nil), "test_proto.MyMessage") - proto.RegisterType((*MyMessage_SomeGroup)(nil), "test_proto.MyMessage.SomeGroup") - proto.RegisterType((*Ext)(nil), "test_proto.Ext") - proto.RegisterMapType((map[int32]int32)(nil), "test_proto.Ext.MapFieldEntry") - proto.RegisterType((*ComplexExtension)(nil), "test_proto.ComplexExtension") - proto.RegisterType((*DefaultsMessage)(nil), "test_proto.DefaultsMessage") - proto.RegisterType((*MyMessageSet)(nil), "test_proto.MyMessageSet") - proto.RegisterType((*Empty)(nil), "test_proto.Empty") - proto.RegisterType((*MessageList)(nil), "test_proto.MessageList") - proto.RegisterType((*MessageList_Message)(nil), "test_proto.MessageList.Message") - proto.RegisterType((*Strings)(nil), "test_proto.Strings") - proto.RegisterType((*Defaults)(nil), "test_proto.Defaults") - proto.RegisterType((*SubDefaults)(nil), "test_proto.SubDefaults") - proto.RegisterType((*RepeatedEnum)(nil), "test_proto.RepeatedEnum") - proto.RegisterType((*MoreRepeated)(nil), "test_proto.MoreRepeated") - proto.RegisterType((*GroupOld)(nil), "test_proto.GroupOld") - proto.RegisterType((*GroupOld_G)(nil), "test_proto.GroupOld.G") - proto.RegisterType((*GroupNew)(nil), "test_proto.GroupNew") - proto.RegisterType((*GroupNew_G)(nil), "test_proto.GroupNew.G") - proto.RegisterType((*FloatingPoint)(nil), "test_proto.FloatingPoint") - proto.RegisterType((*MessageWithMap)(nil), "test_proto.MessageWithMap") - proto.RegisterMapType((map[bool][]byte)(nil), "test_proto.MessageWithMap.ByteMappingEntry") - proto.RegisterMapType((map[int64]*FloatingPoint)(nil), "test_proto.MessageWithMap.MsgMappingEntry") - proto.RegisterMapType((map[int32]string)(nil), "test_proto.MessageWithMap.NameMappingEntry") - proto.RegisterMapType((map[string]string)(nil), "test_proto.MessageWithMap.StrToStrEntry") - proto.RegisterType((*Oneof)(nil), "test_proto.Oneof") - proto.RegisterType((*Oneof_F_Group)(nil), "test_proto.Oneof.F_Group") - proto.RegisterType((*Communique)(nil), "test_proto.Communique") - proto.RegisterType((*TestUTF8)(nil), "test_proto.TestUTF8") - proto.RegisterMapType((map[string]int64)(nil), "test_proto.TestUTF8.MapKeyEntry") - proto.RegisterMapType((map[int64]string)(nil), "test_proto.TestUTF8.MapValueEntry") - proto.RegisterEnum("test_proto.FOO", FOO_name, FOO_value) - proto.RegisterEnum("test_proto.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) - proto.RegisterEnum("test_proto.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) - proto.RegisterEnum("test_proto.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) - proto.RegisterEnum("test_proto.Defaults_Color", Defaults_Color_name, Defaults_Color_value) - proto.RegisterEnum("test_proto.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) - proto.RegisterExtension(E_Ext_More) - proto.RegisterExtension(E_Ext_Text) - proto.RegisterExtension(E_Ext_Number) - proto.RegisterExtension(E_Greeting) - proto.RegisterExtension(E_Complex) - proto.RegisterExtension(E_RComplex) - proto.RegisterExtension(E_NoDefaultDouble) - proto.RegisterExtension(E_NoDefaultFloat) - proto.RegisterExtension(E_NoDefaultInt32) - proto.RegisterExtension(E_NoDefaultInt64) - proto.RegisterExtension(E_NoDefaultUint32) - proto.RegisterExtension(E_NoDefaultUint64) - proto.RegisterExtension(E_NoDefaultSint32) - proto.RegisterExtension(E_NoDefaultSint64) - proto.RegisterExtension(E_NoDefaultFixed32) - proto.RegisterExtension(E_NoDefaultFixed64) - proto.RegisterExtension(E_NoDefaultSfixed32) - proto.RegisterExtension(E_NoDefaultSfixed64) - proto.RegisterExtension(E_NoDefaultBool) - proto.RegisterExtension(E_NoDefaultString) - proto.RegisterExtension(E_NoDefaultBytes) - proto.RegisterExtension(E_NoDefaultEnum) - proto.RegisterExtension(E_DefaultDouble) - proto.RegisterExtension(E_DefaultFloat) - proto.RegisterExtension(E_DefaultInt32) - proto.RegisterExtension(E_DefaultInt64) - proto.RegisterExtension(E_DefaultUint32) - proto.RegisterExtension(E_DefaultUint64) - proto.RegisterExtension(E_DefaultSint32) - proto.RegisterExtension(E_DefaultSint64) - proto.RegisterExtension(E_DefaultFixed32) - proto.RegisterExtension(E_DefaultFixed64) - proto.RegisterExtension(E_DefaultSfixed32) - proto.RegisterExtension(E_DefaultSfixed64) - proto.RegisterExtension(E_DefaultBool) - proto.RegisterExtension(E_DefaultString) - proto.RegisterExtension(E_DefaultBytes) - proto.RegisterExtension(E_DefaultEnum) - proto.RegisterExtension(E_X201) - proto.RegisterExtension(E_X202) - proto.RegisterExtension(E_X203) - proto.RegisterExtension(E_X204) - proto.RegisterExtension(E_X205) - proto.RegisterExtension(E_X206) - proto.RegisterExtension(E_X207) - proto.RegisterExtension(E_X208) - proto.RegisterExtension(E_X209) - proto.RegisterExtension(E_X210) - proto.RegisterExtension(E_X211) - proto.RegisterExtension(E_X212) - proto.RegisterExtension(E_X213) - proto.RegisterExtension(E_X214) - proto.RegisterExtension(E_X215) - proto.RegisterExtension(E_X216) - proto.RegisterExtension(E_X217) - proto.RegisterExtension(E_X218) - proto.RegisterExtension(E_X219) - proto.RegisterExtension(E_X220) - proto.RegisterExtension(E_X221) - proto.RegisterExtension(E_X222) - proto.RegisterExtension(E_X223) - proto.RegisterExtension(E_X224) - proto.RegisterExtension(E_X225) - proto.RegisterExtension(E_X226) - proto.RegisterExtension(E_X227) - proto.RegisterExtension(E_X228) - proto.RegisterExtension(E_X229) - proto.RegisterExtension(E_X230) - proto.RegisterExtension(E_X231) - proto.RegisterExtension(E_X232) - proto.RegisterExtension(E_X233) - proto.RegisterExtension(E_X234) - proto.RegisterExtension(E_X235) - proto.RegisterExtension(E_X236) - proto.RegisterExtension(E_X237) - proto.RegisterExtension(E_X238) - proto.RegisterExtension(E_X239) - proto.RegisterExtension(E_X240) - proto.RegisterExtension(E_X241) - proto.RegisterExtension(E_X242) - proto.RegisterExtension(E_X243) - proto.RegisterExtension(E_X244) - proto.RegisterExtension(E_X245) - proto.RegisterExtension(E_X246) - proto.RegisterExtension(E_X247) - proto.RegisterExtension(E_X248) - proto.RegisterExtension(E_X249) - proto.RegisterExtension(E_X250) -} - -func init() { proto.RegisterFile("test_proto/test.proto", fileDescriptor_test_ee9f66cbbebc227c) } - -var fileDescriptor_test_ee9f66cbbebc227c = []byte{ - // 4795 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5b, 0xd9, 0x73, 0x1b, 0x47, - 0x7a, 0xd7, 0x0c, 0xee, 0x0f, 0x20, 0x31, 0x6c, 0xc9, 0x12, 0x44, 0x59, 0xd2, 0x08, 0x6b, 0xaf, - 0x61, 0xc9, 0xa2, 0x48, 0x60, 0x08, 0x49, 0x70, 0xec, 0x58, 0x07, 0x41, 0xb3, 0x24, 0x12, 0xf2, - 0x90, 0xb6, 0xb3, 0xca, 0x03, 0x0a, 0x24, 0x06, 0x20, 0x56, 0xc0, 0x0c, 0x0c, 0x0c, 0x56, 0x64, - 0x52, 0xa9, 0xf2, 0x63, 0xaa, 0xf2, 0x94, 0x4d, 0x52, 0x95, 0xf7, 0xbc, 0xe4, 0x25, 0xd7, 0x43, - 0xf2, 0x37, 0xc4, 0xd7, 0x7a, 0x77, 0xbd, 0x57, 0x92, 0x4d, 0x36, 0xf7, 0x9d, 0xcd, 0xbd, 0x47, - 0x5e, 0x9c, 0xea, 0xaf, 0x7b, 0x66, 0x7a, 0x06, 0x50, 0x93, 0x7c, 0xe2, 0x74, 0xf7, 0xef, 0xfb, - 0xf5, 0xf5, 0x9b, 0xef, 0xfb, 0xba, 0x31, 0x84, 0xe7, 0x5c, 0x6b, 0xec, 0x36, 0x87, 0x23, 0xc7, - 0x75, 0x6e, 0xd0, 0xc7, 0x25, 0x7c, 0x24, 0x10, 0x54, 0x17, 0xaf, 0x41, 0x72, 0xdd, 0x59, 0xb3, - 0x27, 0x03, 0x72, 0x05, 0x62, 0x1d, 0xc7, 0x29, 0x28, 0xba, 0x5a, 0x9a, 0x2f, 0xe7, 0x97, 0x02, - 0xcc, 0x52, 0xbd, 0xd1, 0x30, 0x69, 0x5b, 0xf1, 0x26, 0x64, 0xd7, 0x9d, 0x1d, 0x6b, 0xec, 0xd6, - 0x7b, 0x56, 0xbf, 0x4d, 0xce, 0x40, 0xe2, 0x61, 0x6b, 0xd7, 0xea, 0xa3, 0x4d, 0xc6, 0x64, 0x05, - 0x42, 0x20, 0xbe, 0x73, 0x38, 0xb4, 0x0a, 0x2a, 0x56, 0xe2, 0x73, 0xf1, 0x0f, 0x8b, 0xb4, 0x1b, - 0x6a, 0x49, 0xae, 0x41, 0xfc, 0x41, 0xcf, 0x6e, 0xf3, 0x7e, 0xce, 0x89, 0xfd, 0x30, 0xc4, 0xd2, - 0x83, 0x8d, 0xad, 0xfb, 0x26, 0x82, 0x68, 0x0f, 0x3b, 0xad, 0xdd, 0x3e, 0x25, 0x53, 0x68, 0x0f, - 0x58, 0xa0, 0xb5, 0x8f, 0x5a, 0xa3, 0xd6, 0xa0, 0x10, 0xd3, 0x95, 0x52, 0xc2, 0x64, 0x05, 0xf2, - 0x1a, 0xcc, 0x99, 0xd6, 0x7b, 0x93, 0xde, 0xc8, 0x6a, 0xe3, 0xf0, 0x0a, 0x71, 0x5d, 0x2d, 0x65, - 0x67, 0xf5, 0x80, 0xcd, 0x66, 0x18, 0xcd, 0xcc, 0x87, 0x56, 0xcb, 0xf5, 0xcc, 0x13, 0x7a, 0xec, - 0x08, 0x73, 0x01, 0x4d, 0xcd, 0x1b, 0x43, 0xb7, 0xe7, 0xd8, 0xad, 0x3e, 0x33, 0x4f, 0xea, 0x8a, - 0xd4, 0x3c, 0x84, 0x26, 0x5f, 0x84, 0x7c, 0xbd, 0x79, 0xd7, 0x71, 0xfa, 0xcd, 0x11, 0x1f, 0x55, - 0x01, 0x74, 0xb5, 0x94, 0x36, 0xe7, 0xea, 0xb4, 0xd6, 0x1b, 0x2a, 0x29, 0x81, 0x56, 0x6f, 0x6e, - 0xd8, 0x6e, 0xa5, 0x1c, 0x00, 0xb3, 0xba, 0x5a, 0x4a, 0x98, 0xf3, 0x75, 0xac, 0x9e, 0x42, 0x56, - 0x8d, 0x00, 0x99, 0xd3, 0xd5, 0x52, 0x8c, 0x21, 0xab, 0x86, 0x8f, 0x7c, 0x05, 0x48, 0xbd, 0x59, - 0xef, 0x1d, 0x58, 0x6d, 0x91, 0x75, 0x4e, 0x57, 0x4b, 0x29, 0x53, 0xab, 0xf3, 0x86, 0x19, 0x68, - 0x91, 0x79, 0x5e, 0x57, 0x4b, 0x49, 0x0f, 0x2d, 0x70, 0x5f, 0x85, 0x85, 0x7a, 0xf3, 0xed, 0x5e, - 0x78, 0xc0, 0x79, 0x5d, 0x2d, 0xcd, 0x99, 0xf9, 0x3a, 0xab, 0x9f, 0xc6, 0x8a, 0xc4, 0x9a, 0xae, - 0x96, 0xe2, 0x1c, 0x2b, 0xf0, 0xe2, 0xec, 0xea, 0x7d, 0xa7, 0xe5, 0x06, 0xd0, 0x05, 0x5d, 0x2d, - 0xa9, 0xe6, 0x7c, 0x1d, 0xab, 0xc3, 0xac, 0xf7, 0x9d, 0xc9, 0x6e, 0xdf, 0x0a, 0xa0, 0x44, 0x57, - 0x4b, 0x8a, 0x99, 0xaf, 0xb3, 0xfa, 0x30, 0x76, 0xdb, 0x1d, 0xf5, 0xec, 0x6e, 0x80, 0x3d, 0x8d, - 0x3a, 0xce, 0xd7, 0x59, 0x7d, 0x78, 0x04, 0x77, 0x0f, 0x5d, 0x6b, 0x1c, 0x40, 0x2d, 0x5d, 0x2d, - 0xe5, 0xcc, 0xf9, 0x3a, 0x56, 0x47, 0x58, 0x23, 0x6b, 0xd0, 0xd1, 0xd5, 0xd2, 0x02, 0x65, 0x9d, - 0xb1, 0x06, 0xdb, 0x91, 0x35, 0xe8, 0xea, 0x6a, 0x89, 0x70, 0xac, 0xb0, 0x06, 0x4b, 0x70, 0xba, - 0xde, 0xdc, 0xee, 0x44, 0x37, 0x6e, 0x5f, 0x57, 0x4b, 0x79, 0x73, 0xa1, 0xee, 0xb5, 0xcc, 0xc2, - 0x8b, 0xec, 0x3d, 0x5d, 0x2d, 0x69, 0x3e, 0x5e, 0xe0, 0x17, 0x35, 0xc9, 0xa4, 0x5e, 0x38, 0xa3, - 0xc7, 0x04, 0x4d, 0xb2, 0xca, 0xb0, 0x26, 0x39, 0xf0, 0x39, 0x3d, 0x26, 0x6a, 0x32, 0x82, 0xc4, - 0xee, 0x39, 0xf2, 0xac, 0x1e, 0x13, 0x35, 0xc9, 0x91, 0x11, 0x4d, 0x72, 0xec, 0x39, 0x3d, 0x16, - 0xd6, 0xe4, 0x14, 0x5a, 0x64, 0x2e, 0xe8, 0xb1, 0xb0, 0x26, 0x39, 0x3a, 0xac, 0x49, 0x0e, 0x3e, - 0xaf, 0xc7, 0x42, 0x9a, 0x8c, 0x62, 0x45, 0xe2, 0x45, 0x3d, 0x16, 0xd2, 0xa4, 0x38, 0x3b, 0x4f, - 0x93, 0x1c, 0x7a, 0x41, 0x8f, 0x89, 0x9a, 0x14, 0x59, 0x7d, 0x4d, 0x72, 0xe8, 0xf3, 0x7a, 0x2c, - 0xa4, 0x49, 0x11, 0xeb, 0x6b, 0x92, 0x63, 0x2f, 0xea, 0xb1, 0x90, 0x26, 0x39, 0xf6, 0x65, 0x51, - 0x93, 0x1c, 0xfa, 0x81, 0xa2, 0xc7, 0x44, 0x51, 0x72, 0xe8, 0xb5, 0x90, 0x28, 0x39, 0xf6, 0x43, - 0x8a, 0x15, 0x55, 0x19, 0x05, 0x8b, 0xab, 0xf0, 0x11, 0x05, 0x8b, 0xb2, 0xe4, 0xe0, 0x1b, 0x11, - 0x59, 0x72, 0xf8, 0xc7, 0x14, 0x1e, 0xd6, 0xe5, 0xb4, 0x81, 0xc8, 0xff, 0x09, 0x35, 0x08, 0x0b, - 0x93, 0x1b, 0x04, 0xc2, 0x74, 0xb8, 0x13, 0x2d, 0x5c, 0xd2, 0x15, 0x5f, 0x98, 0x9e, 0x67, 0x15, - 0x85, 0xe9, 0x03, 0x2f, 0x63, 0xc8, 0xe0, 0xc2, 0x9c, 0x42, 0x56, 0x8d, 0x00, 0xa9, 0xeb, 0x4a, - 0x20, 0x4c, 0x1f, 0x19, 0x12, 0xa6, 0x8f, 0xbd, 0xa2, 0x2b, 0xa2, 0x30, 0x67, 0xa0, 0x45, 0xe6, - 0xa2, 0xae, 0x88, 0xc2, 0xf4, 0xd1, 0xa2, 0x30, 0x7d, 0xf0, 0x17, 0x74, 0x45, 0x10, 0xe6, 0x34, - 0x56, 0x24, 0x7e, 0x41, 0x57, 0x04, 0x61, 0x86, 0x67, 0xc7, 0x84, 0xe9, 0x43, 0x5f, 0xd4, 0x95, - 0x40, 0x98, 0x61, 0x56, 0x2e, 0x4c, 0x1f, 0xfa, 0x45, 0x5d, 0x11, 0x84, 0x19, 0xc6, 0x72, 0x61, - 0xfa, 0xd8, 0x97, 0x30, 0x4e, 0x7b, 0xc2, 0xf4, 0xb1, 0x82, 0x30, 0x7d, 0xe8, 0xef, 0xd0, 0x98, - 0xee, 0x0b, 0xd3, 0x87, 0x8a, 0xc2, 0xf4, 0xb1, 0xbf, 0x4b, 0xb1, 0x81, 0x30, 0xa7, 0xc1, 0xe2, - 0x2a, 0xfc, 0x1e, 0x05, 0x07, 0xc2, 0xf4, 0xc1, 0x61, 0x61, 0xfa, 0xf0, 0xdf, 0xa7, 0x70, 0x51, - 0x98, 0xb3, 0x0c, 0x44, 0xfe, 0x3f, 0xa0, 0x06, 0xa2, 0x30, 0x7d, 0x83, 0x25, 0x9c, 0x26, 0x15, - 0x66, 0xdb, 0xea, 0xb4, 0x26, 0x7d, 0x2a, 0xe3, 0x12, 0x55, 0x66, 0x2d, 0xee, 0x8e, 0x26, 0x16, - 0x9d, 0xab, 0xe3, 0xf4, 0xef, 0x7b, 0x6d, 0x64, 0x89, 0x0e, 0x9f, 0x09, 0x34, 0x30, 0x78, 0x99, - 0x2a, 0xb4, 0xa6, 0x56, 0xca, 0x66, 0x9e, 0xa9, 0x74, 0x1a, 0x5f, 0x35, 0x04, 0xfc, 0x55, 0xaa, - 0xd3, 0x9a, 0x5a, 0x35, 0x18, 0xbe, 0x6a, 0x04, 0xf8, 0x0a, 0x9d, 0x80, 0x27, 0xd6, 0xc0, 0xe2, - 0x1a, 0x55, 0x6b, 0x2d, 0x56, 0x29, 0x2f, 0x9b, 0x0b, 0x9e, 0x64, 0x67, 0x19, 0x85, 0xba, 0x79, - 0x85, 0x8a, 0xb6, 0x16, 0xab, 0x1a, 0xbe, 0x91, 0xd8, 0x53, 0x99, 0x0a, 0x9d, 0x4b, 0x37, 0xb0, - 0xb9, 0x4e, 0xb5, 0x5b, 0x8b, 0x57, 0xca, 0xcb, 0xcb, 0xa6, 0xc6, 0x15, 0x3c, 0xc3, 0x26, 0xd4, - 0xcf, 0x12, 0xd5, 0x70, 0x2d, 0x5e, 0x35, 0x7c, 0x9b, 0x70, 0x3f, 0x0b, 0x9e, 0x94, 0x03, 0x93, - 0x1b, 0x54, 0xcb, 0xb5, 0x64, 0x65, 0xc5, 0x58, 0x59, 0xbd, 0x6d, 0xe6, 0x99, 0xa6, 0x03, 0x1b, - 0x83, 0xf6, 0xc3, 0x45, 0x1d, 0x18, 0x2d, 0x53, 0x55, 0xd7, 0x92, 0xe5, 0x9b, 0x2b, 0xb7, 0xca, - 0xb7, 0x4c, 0x8d, 0xab, 0x3b, 0xb0, 0x7a, 0x9d, 0x5a, 0x71, 0x79, 0x07, 0x56, 0x2b, 0x54, 0xdf, - 0x35, 0x6d, 0xdf, 0xea, 0xf7, 0x9d, 0x57, 0xf4, 0xe2, 0x53, 0x67, 0xd4, 0x6f, 0x5f, 0x29, 0x82, - 0xa9, 0x71, 0xc5, 0x8b, 0xbd, 0x2e, 0x78, 0x92, 0x0f, 0xcc, 0x7f, 0x95, 0x66, 0xac, 0xb9, 0x5a, - 0xea, 0x6e, 0xaf, 0x6b, 0x3b, 0x63, 0xcb, 0xcc, 0x33, 0xf1, 0x47, 0xd6, 0x64, 0x3b, 0xba, 0x8e, - 0x5f, 0xa5, 0x66, 0x0b, 0xb5, 0xd8, 0xf5, 0x4a, 0x99, 0xf6, 0x34, 0x6b, 0x1d, 0xb7, 0xa3, 0xeb, - 0xf8, 0x6b, 0xd4, 0x86, 0xd4, 0x62, 0xd7, 0xab, 0x06, 0xb7, 0x11, 0xd7, 0xb1, 0x0a, 0x67, 0x84, - 0x77, 0x21, 0xb0, 0xfa, 0x75, 0x6a, 0x95, 0x67, 0x3d, 0x11, 0xff, 0x8d, 0x98, 0x69, 0x17, 0xea, - 0xed, 0x37, 0xa8, 0x9d, 0xc6, 0x7a, 0x23, 0xfe, 0x8b, 0x11, 0xd8, 0xdd, 0x84, 0xb3, 0x91, 0x5c, - 0xa2, 0x39, 0x6c, 0xed, 0x3d, 0xb1, 0xda, 0x85, 0x32, 0x4d, 0x29, 0xee, 0xaa, 0x9a, 0x62, 0x9e, - 0x0e, 0xa5, 0x15, 0x8f, 0xb0, 0x99, 0xdc, 0x86, 0x73, 0xd1, 0xe4, 0xc2, 0xb3, 0xac, 0xd0, 0x1c, - 0x03, 0x2d, 0xcf, 0x84, 0xf3, 0x8c, 0x88, 0xa9, 0x10, 0x54, 0x3c, 0x53, 0x83, 0x26, 0x1d, 0x81, - 0x69, 0x10, 0x5b, 0xb8, 0xe9, 0x6b, 0x70, 0x7e, 0x3a, 0xfd, 0xf0, 0x8c, 0x57, 0x69, 0x16, 0x82, - 0xc6, 0x67, 0xa3, 0x99, 0xc8, 0x94, 0xf9, 0x8c, 0xbe, 0xab, 0x34, 0x2d, 0x11, 0xcd, 0xa7, 0x7a, - 0x7f, 0x15, 0x0a, 0x53, 0x09, 0x8a, 0x67, 0x7d, 0x93, 0xe6, 0x29, 0x68, 0xfd, 0x5c, 0x24, 0x57, - 0x89, 0x1a, 0xcf, 0xe8, 0xfa, 0x16, 0x4d, 0x5c, 0x04, 0xe3, 0xa9, 0x9e, 0x71, 0xc9, 0xc2, 0x29, - 0x8c, 0x67, 0x7b, 0x9b, 0x66, 0x32, 0x7c, 0xc9, 0x42, 0xd9, 0x8c, 0xd8, 0x6f, 0x24, 0xa7, 0xf1, - 0x6c, 0x6b, 0x34, 0xb5, 0xe1, 0xfd, 0x86, 0xd3, 0x1b, 0x6e, 0xfc, 0x33, 0xd4, 0x78, 0x7b, 0xf6, - 0x8c, 0x7f, 0x14, 0xa3, 0x49, 0x09, 0xb7, 0xde, 0x9e, 0x35, 0x65, 0xdf, 0x7a, 0xc6, 0x94, 0x7f, - 0x4c, 0xad, 0x89, 0x60, 0x3d, 0x35, 0xe7, 0x37, 0x60, 0x71, 0x46, 0xbe, 0xe2, 0xd9, 0xff, 0x84, - 0xda, 0xe7, 0xd1, 0xfe, 0xdc, 0x54, 0xea, 0x32, 0xcd, 0x30, 0x63, 0x04, 0x3f, 0xa5, 0x0c, 0x5a, - 0x88, 0x61, 0x6a, 0x0c, 0x75, 0x98, 0xf3, 0xf2, 0xf1, 0xee, 0xc8, 0x99, 0x0c, 0x0b, 0x75, 0x5d, - 0x2d, 0x41, 0x59, 0x9f, 0x71, 0x3a, 0xf6, 0xd2, 0xf3, 0x75, 0x8a, 0x33, 0xc3, 0x66, 0x8c, 0x87, - 0x31, 0x33, 0x9e, 0x47, 0x7a, 0xec, 0x99, 0x3c, 0x0c, 0xe7, 0xf3, 0x08, 0x66, 0x94, 0xc7, 0x0b, - 0x77, 0x8c, 0xe7, 0xb1, 0xae, 0x3c, 0x83, 0xc7, 0x0b, 0x7e, 0x9c, 0x27, 0x64, 0xb6, 0xb8, 0x1a, - 0x9c, 0xc9, 0xb1, 0x9d, 0xbc, 0x10, 0x3d, 0xa4, 0xaf, 0xe3, 0xe9, 0x2a, 0x5c, 0xc9, 0xcc, 0x84, - 0xe1, 0x4d, 0x9b, 0xbd, 0xf5, 0x0c, 0xb3, 0xd0, 0x68, 0xa6, 0xcd, 0x7e, 0x7e, 0x86, 0x59, 0xf1, - 0x37, 0x15, 0x88, 0x3f, 0xd8, 0xd8, 0xba, 0x4f, 0xd2, 0x10, 0x7f, 0xa7, 0xb1, 0x71, 0x5f, 0x3b, - 0x45, 0x9f, 0xee, 0x36, 0x1a, 0x0f, 0x35, 0x85, 0x64, 0x20, 0x71, 0xf7, 0x4b, 0x3b, 0x6b, 0xdb, - 0x9a, 0x4a, 0xf2, 0x90, 0xad, 0x6f, 0x6c, 0xad, 0xaf, 0x99, 0x8f, 0xcc, 0x8d, 0xad, 0x1d, 0x2d, - 0x46, 0xdb, 0xea, 0x0f, 0x1b, 0x77, 0x76, 0xb4, 0x38, 0x49, 0x41, 0x8c, 0xd6, 0x25, 0x08, 0x40, - 0x72, 0x7b, 0xc7, 0xdc, 0xd8, 0x5a, 0xd7, 0x92, 0x94, 0x65, 0x67, 0x63, 0x73, 0x4d, 0x4b, 0x51, - 0xe4, 0xce, 0xdb, 0x8f, 0x1e, 0xae, 0x69, 0x69, 0xfa, 0x78, 0xc7, 0x34, 0xef, 0x7c, 0x49, 0xcb, - 0x50, 0xa3, 0xcd, 0x3b, 0x8f, 0x34, 0xc0, 0xe6, 0x3b, 0x77, 0x1f, 0xae, 0x69, 0x59, 0x92, 0x83, - 0x74, 0xfd, 0xed, 0xad, 0x7b, 0x3b, 0x1b, 0x8d, 0x2d, 0x2d, 0x57, 0xfc, 0x45, 0x28, 0xb0, 0x65, - 0x0e, 0xad, 0x22, 0xbb, 0x32, 0x78, 0x03, 0x12, 0x6c, 0x6f, 0x14, 0xd4, 0xca, 0xd5, 0xe9, 0xbd, - 0x99, 0x36, 0x5a, 0x62, 0xbb, 0xc4, 0x0c, 0x17, 0x2f, 0x42, 0x82, 0xad, 0xd3, 0x19, 0x48, 0xb0, - 0xf5, 0x51, 0xf1, 0x2a, 0x81, 0x15, 0x8a, 0xbf, 0xa5, 0x02, 0xac, 0x3b, 0xdb, 0x4f, 0x7a, 0x43, - 0xbc, 0xb8, 0xb9, 0x08, 0x30, 0x7e, 0xd2, 0x1b, 0x36, 0xf1, 0x0d, 0xe4, 0x97, 0x0e, 0x19, 0x5a, - 0x83, 0xbe, 0x97, 0x5c, 0x81, 0x1c, 0x36, 0xf3, 0x57, 0x04, 0xef, 0x1a, 0x52, 0x66, 0x96, 0xd6, - 0x71, 0x27, 0x19, 0x86, 0x54, 0x0d, 0xbc, 0x62, 0x48, 0x0a, 0x90, 0xaa, 0x41, 0x2e, 0x03, 0x16, - 0x9b, 0x63, 0x8c, 0xa6, 0x78, 0xad, 0x90, 0x31, 0xb1, 0x5f, 0x16, 0x5f, 0xc9, 0xeb, 0x80, 0x7d, - 0xb2, 0x99, 0xe7, 0x67, 0xbd, 0x25, 0xde, 0x80, 0x97, 0xe8, 0x03, 0x9b, 0x6f, 0x60, 0xb2, 0xd8, - 0x80, 0x8c, 0x5f, 0x4f, 0x7b, 0xc3, 0x5a, 0x3e, 0x27, 0x0d, 0xe7, 0x04, 0x58, 0xe5, 0x4f, 0x8a, - 0x01, 0xf8, 0x78, 0x16, 0x70, 0x3c, 0xcc, 0x88, 0x0d, 0xa8, 0x78, 0x11, 0xe6, 0xb6, 0x1c, 0x9b, - 0xbd, 0xc7, 0xb8, 0x4e, 0x39, 0x50, 0x5a, 0x05, 0x05, 0xcf, 0xbf, 0x4a, 0xab, 0x78, 0x09, 0x40, - 0x68, 0xd3, 0x40, 0xd9, 0x65, 0x6d, 0xe8, 0x0f, 0x94, 0xdd, 0xe2, 0x35, 0x48, 0x6e, 0xb6, 0x0e, - 0x76, 0x5a, 0x5d, 0x72, 0x05, 0xa0, 0xdf, 0x1a, 0xbb, 0xcd, 0x0e, 0xee, 0xc4, 0xe7, 0x9f, 0x7f, - 0xfe, 0xb9, 0x82, 0xc9, 0x74, 0x86, 0xd6, 0xb2, 0x1d, 0x19, 0x03, 0x34, 0xfa, 0xed, 0x4d, 0x6b, - 0x3c, 0x6e, 0x75, 0x2d, 0xb2, 0x0a, 0x49, 0xdb, 0x1a, 0xd3, 0xe8, 0xab, 0xe0, 0x5d, 0xd3, 0x45, - 0x71, 0x1d, 0x02, 0xdc, 0xd2, 0x16, 0x82, 0x4c, 0x0e, 0x26, 0x1a, 0xc4, 0xec, 0xc9, 0x00, 0x6f, - 0xd4, 0x12, 0x26, 0x7d, 0x5c, 0x7c, 0x1e, 0x92, 0x0c, 0x43, 0x08, 0xc4, 0xed, 0xd6, 0xc0, 0x2a, - 0xb0, 0x9e, 0xf1, 0xb9, 0xf8, 0x55, 0x05, 0x60, 0xcb, 0x7a, 0x7a, 0xac, 0x5e, 0x03, 0x9c, 0xa4, - 0xd7, 0x18, 0xeb, 0xf5, 0x55, 0x59, 0xaf, 0x54, 0x6d, 0x1d, 0xc7, 0x69, 0x37, 0xd9, 0x46, 0xb3, - 0xeb, 0xbf, 0x0c, 0xad, 0xc1, 0x9d, 0x2b, 0x3e, 0x86, 0xdc, 0x86, 0x6d, 0x5b, 0x23, 0x6f, 0x54, - 0x04, 0xe2, 0xfb, 0xce, 0xd8, 0xe5, 0x37, 0x91, 0xf8, 0x4c, 0x0a, 0x10, 0x1f, 0x3a, 0x23, 0x97, - 0xcd, 0xb4, 0x16, 0x37, 0x96, 0x97, 0x97, 0x4d, 0xac, 0x21, 0xcf, 0x43, 0x66, 0xcf, 0xb1, 0x6d, - 0x6b, 0x8f, 0x4e, 0x23, 0x86, 0x47, 0xc7, 0xa0, 0xa2, 0xf8, 0xcb, 0x0a, 0xe4, 0x1a, 0xee, 0x7e, - 0x40, 0xae, 0x41, 0xec, 0x89, 0x75, 0x88, 0xc3, 0x8b, 0x99, 0xf4, 0x91, 0xbe, 0x30, 0x5f, 0x69, - 0xf5, 0x27, 0xec, 0x5e, 0x32, 0x67, 0xb2, 0x02, 0x39, 0x0b, 0xc9, 0xa7, 0x56, 0xaf, 0xbb, 0xef, - 0x22, 0xa7, 0x6a, 0xf2, 0x12, 0x59, 0x82, 0x44, 0x8f, 0x0e, 0xb6, 0x10, 0xc7, 0x15, 0x2b, 0x88, - 0x2b, 0x26, 0xce, 0xc2, 0x64, 0xb0, 0xab, 0xe9, 0x74, 0x5b, 0x7b, 0xff, 0xfd, 0xf7, 0xdf, 0x57, - 0x8b, 0xfb, 0x70, 0xc6, 0x7b, 0x89, 0x43, 0xd3, 0x7d, 0x04, 0x85, 0xbe, 0xe5, 0x34, 0x3b, 0x3d, - 0xbb, 0xd5, 0xef, 0x1f, 0x36, 0x9f, 0x3a, 0x76, 0xb3, 0x65, 0x37, 0x9d, 0xf1, 0x5e, 0x6b, 0x84, - 0x4b, 0x20, 0xeb, 0xe4, 0x4c, 0xdf, 0x72, 0xea, 0xcc, 0xf0, 0x5d, 0xc7, 0xbe, 0x63, 0x37, 0xa8, - 0x55, 0xf1, 0xb3, 0x38, 0x64, 0x36, 0x0f, 0x3d, 0xfe, 0x33, 0x90, 0xd8, 0x73, 0x26, 0x36, 0x5b, - 0xcf, 0x84, 0xc9, 0x0a, 0xfe, 0x3e, 0xa9, 0xc2, 0x3e, 0x9d, 0x81, 0xc4, 0x7b, 0x13, 0xc7, 0xb5, - 0x70, 0xca, 0x19, 0x93, 0x15, 0xe8, 0x8a, 0x0d, 0x2d, 0xb7, 0x10, 0xc7, 0x6b, 0x0a, 0xfa, 0x18, - 0xac, 0x41, 0xe2, 0x58, 0x6b, 0x40, 0x96, 0x21, 0xe9, 0xd0, 0x3d, 0x18, 0x17, 0x92, 0x78, 0x0f, - 0x1b, 0x32, 0x10, 0x77, 0xc7, 0xe4, 0x38, 0xf2, 0x00, 0x16, 0x9e, 0x5a, 0xcd, 0xc1, 0x64, 0xec, - 0x36, 0xbb, 0x4e, 0xb3, 0x6d, 0x59, 0x43, 0x6b, 0x54, 0x98, 0xc3, 0xde, 0x42, 0x1e, 0x62, 0xd6, - 0x82, 0x9a, 0xf3, 0x4f, 0xad, 0xcd, 0xc9, 0xd8, 0x5d, 0x77, 0xee, 0xa3, 0x1d, 0x59, 0x85, 0xcc, - 0xc8, 0xa2, 0x7e, 0x81, 0x0e, 0x39, 0x37, 0x3d, 0x82, 0x90, 0x71, 0x7a, 0x64, 0x0d, 0xb1, 0x82, - 0xdc, 0x84, 0xf4, 0x6e, 0xef, 0x89, 0x35, 0xde, 0xb7, 0xda, 0x85, 0x94, 0xae, 0x94, 0xe6, 0xcb, - 0x17, 0x44, 0x2b, 0x7f, 0x81, 0x97, 0xee, 0x39, 0x7d, 0x67, 0x64, 0xfa, 0x60, 0xf2, 0x1a, 0x64, - 0xc6, 0xce, 0xc0, 0x62, 0x6a, 0x4f, 0x63, 0xb0, 0xbd, 0x3c, 0xdb, 0x72, 0xdb, 0x19, 0x58, 0x9e, - 0x57, 0xf3, 0x2c, 0xc8, 0x05, 0x36, 0xdc, 0x5d, 0x7a, 0x98, 0x28, 0x00, 0x5e, 0xf8, 0xd0, 0x41, - 0xe1, 0xe1, 0x82, 0x2c, 0xd2, 0x41, 0x75, 0x3b, 0x34, 0x67, 0x2b, 0x64, 0xf1, 0x2c, 0xef, 0x97, - 0x17, 0x5f, 0x81, 0x8c, 0x4f, 0x18, 0xb8, 0x43, 0xe6, 0x82, 0x32, 0xe8, 0x21, 0x98, 0x3b, 0x64, - 0xfe, 0xe7, 0x45, 0x48, 0xe0, 0xc0, 0x69, 0xe4, 0x32, 0xd7, 0x68, 0xa0, 0xcc, 0x40, 0x62, 0xdd, - 0x5c, 0x5b, 0xdb, 0xd2, 0x14, 0x8c, 0x99, 0x0f, 0xdf, 0x5e, 0xd3, 0x54, 0x41, 0xbf, 0xbf, 0xad, - 0x42, 0x6c, 0xed, 0x00, 0x95, 0xd3, 0x6e, 0xb9, 0x2d, 0xef, 0x0d, 0xa7, 0xcf, 0xa4, 0x06, 0x99, - 0x41, 0xcb, 0xeb, 0x4b, 0xc5, 0x25, 0x0e, 0xf9, 0x92, 0xb5, 0x03, 0x77, 0x69, 0xb3, 0xc5, 0x7a, - 0x5e, 0xb3, 0xdd, 0xd1, 0xa1, 0x99, 0x1e, 0xf0, 0xe2, 0xe2, 0xab, 0x30, 0x17, 0x6a, 0x12, 0x5f, - 0xd1, 0xc4, 0x8c, 0x57, 0x34, 0xc1, 0x5f, 0xd1, 0x9a, 0x7a, 0x4b, 0x29, 0xd7, 0x20, 0x3e, 0x70, - 0x46, 0x16, 0x79, 0x6e, 0xe6, 0x02, 0x17, 0xba, 0x28, 0x99, 0x7c, 0x64, 0x28, 0x26, 0xda, 0x94, - 0x5f, 0x86, 0xb8, 0x6b, 0x1d, 0xb8, 0xcf, 0xb2, 0xdd, 0x67, 0xf3, 0xa3, 0x90, 0xf2, 0x75, 0x48, - 0xda, 0x93, 0xc1, 0xae, 0x35, 0x7a, 0x16, 0xb8, 0x87, 0x03, 0xe3, 0xa0, 0xe2, 0x3b, 0xa0, 0xdd, - 0x73, 0x06, 0xc3, 0xbe, 0x75, 0xb0, 0x76, 0xe0, 0x5a, 0xf6, 0xb8, 0xe7, 0xd8, 0x74, 0x0e, 0x9d, - 0xde, 0x08, 0xdd, 0x1a, 0xce, 0x01, 0x0b, 0xd4, 0xcd, 0x8c, 0xad, 0x3d, 0xc7, 0x6e, 0xf3, 0xa9, - 0xf1, 0x12, 0x45, 0xbb, 0xfb, 0xbd, 0x11, 0xf5, 0x68, 0x34, 0xf8, 0xb0, 0x42, 0x71, 0x1d, 0xf2, - 0xfc, 0x18, 0x36, 0xe6, 0x1d, 0x17, 0xaf, 0x42, 0xce, 0xab, 0xc2, 0x5f, 0x7e, 0xd2, 0x10, 0x7f, - 0xbc, 0x66, 0x36, 0xb4, 0x53, 0x74, 0x5f, 0x1b, 0x5b, 0x6b, 0x9a, 0x42, 0x1f, 0x76, 0xde, 0x6d, - 0x84, 0xf6, 0xf2, 0x79, 0xc8, 0xf9, 0x63, 0xdf, 0xb6, 0x5c, 0x6c, 0xa1, 0x51, 0x2a, 0x55, 0x53, - 0xd3, 0x4a, 0x31, 0x05, 0x89, 0xb5, 0xc1, 0xd0, 0x3d, 0x2c, 0xfe, 0x12, 0x64, 0x39, 0xe8, 0x61, - 0x6f, 0xec, 0x92, 0xdb, 0x90, 0x1a, 0xf0, 0xf9, 0x2a, 0x98, 0x8b, 0x86, 0x65, 0x1d, 0x20, 0xbd, - 0x67, 0xd3, 0xc3, 0x2f, 0x56, 0x20, 0x25, 0xb8, 0x77, 0xee, 0x79, 0x54, 0xd1, 0xf3, 0x30, 0x1f, - 0x15, 0x13, 0x7c, 0x54, 0x71, 0x13, 0x52, 0x2c, 0x30, 0x8f, 0x31, 0xdd, 0x60, 0xe7, 0x77, 0xa6, - 0x31, 0x26, 0xbe, 0x2c, 0xab, 0x63, 0x39, 0xd4, 0x65, 0xc8, 0xe2, 0x3b, 0xe3, 0xab, 0x90, 0x7a, - 0x73, 0xc0, 0x2a, 0xa6, 0xf8, 0x3f, 0x4a, 0x40, 0xda, 0x5b, 0x2b, 0x72, 0x01, 0x92, 0xec, 0x10, - 0x8b, 0x54, 0xde, 0xa5, 0x4e, 0x02, 0x8f, 0xad, 0xe4, 0x02, 0xa4, 0xf8, 0x41, 0x95, 0x07, 0x1c, - 0xb5, 0x52, 0x36, 0x93, 0xec, 0x60, 0xea, 0x37, 0x56, 0x0d, 0xf4, 0x93, 0xec, 0xba, 0x26, 0xc9, - 0x8e, 0x9e, 0x44, 0x87, 0x8c, 0x7f, 0xd8, 0xc4, 0x10, 0xc1, 0xef, 0x66, 0xd2, 0xde, 0xe9, 0x52, - 0x40, 0x54, 0x0d, 0x74, 0xa0, 0xfc, 0x22, 0x26, 0x5d, 0x0f, 0xf2, 0xa6, 0xb4, 0x77, 0x64, 0xc4, - 0x5f, 0x9e, 0xbc, 0x5b, 0x97, 0x14, 0x3f, 0x24, 0x06, 0x80, 0xaa, 0x81, 0x9e, 0xc9, 0xbb, 0x62, - 0x49, 0xf1, 0x83, 0x20, 0xb9, 0x4c, 0x87, 0x88, 0x07, 0x3b, 0xf4, 0x3f, 0xc1, 0x7d, 0x4a, 0x92, - 0x1d, 0xf7, 0xc8, 0x15, 0xca, 0xc0, 0x4e, 0x6f, 0xe8, 0x1a, 0x82, 0xcb, 0x93, 0x14, 0x3f, 0xd4, - 0x91, 0x6b, 0x14, 0xc2, 0x96, 0xbf, 0x00, 0xcf, 0xb8, 0x29, 0x49, 0xf1, 0x9b, 0x12, 0xa2, 0xd3, - 0x0e, 0xd1, 0x43, 0xa1, 0x57, 0x12, 0x6e, 0x45, 0x92, 0xec, 0x56, 0x84, 0x5c, 0x42, 0x3a, 0x36, - 0xa9, 0x5c, 0x70, 0x03, 0x92, 0xe2, 0xa7, 0xc0, 0xa0, 0x1d, 0x73, 0x49, 0xff, 0xb6, 0x23, 0xc5, - 0xcf, 0x79, 0xe4, 0x16, 0xdd, 0x2f, 0xaa, 0xf0, 0xc2, 0x3c, 0xfa, 0xe2, 0x45, 0x51, 0x7a, 0xde, - 0xae, 0x32, 0x57, 0x5c, 0x63, 0x6e, 0xcc, 0x4c, 0xd4, 0xf1, 0x8d, 0x58, 0xa4, 0x96, 0x8f, 0x7a, - 0x76, 0xa7, 0x90, 0xc7, 0xb5, 0x88, 0xf5, 0xec, 0x8e, 0x99, 0xa8, 0xd3, 0x1a, 0xa6, 0x82, 0x2d, - 0xda, 0xa6, 0x61, 0x5b, 0xfc, 0x3a, 0x6b, 0xa4, 0x55, 0xa4, 0x00, 0x89, 0x7a, 0x73, 0xab, 0x65, - 0x17, 0x16, 0x98, 0x9d, 0xdd, 0xb2, 0xcd, 0x78, 0x7d, 0xab, 0x65, 0x93, 0x97, 0x21, 0x36, 0x9e, - 0xec, 0x16, 0xc8, 0xf4, 0xcf, 0x82, 0xdb, 0x93, 0x5d, 0x6f, 0x30, 0x26, 0xc5, 0x90, 0x0b, 0x90, - 0x1e, 0xbb, 0xa3, 0xe6, 0x2f, 0x58, 0x23, 0xa7, 0x70, 0x1a, 0x97, 0xf1, 0x94, 0x99, 0x1a, 0xbb, - 0xa3, 0xc7, 0xd6, 0xc8, 0x39, 0xa6, 0x0f, 0x2e, 0x5e, 0x82, 0xac, 0xc0, 0x4b, 0xf2, 0xa0, 0xd8, - 0x2c, 0x81, 0xa9, 0x29, 0x37, 0x4d, 0xc5, 0x2e, 0xbe, 0x03, 0x39, 0xef, 0x88, 0x85, 0x33, 0x36, - 0xe8, 0xdb, 0xd4, 0x77, 0x46, 0xf8, 0x96, 0xce, 0x97, 0x2f, 0x85, 0x23, 0x66, 0x00, 0xe4, 0x91, - 0x8b, 0x81, 0x8b, 0x5a, 0x64, 0x30, 0x4a, 0xf1, 0x07, 0x0a, 0xe4, 0x36, 0x9d, 0x51, 0xf0, 0xfb, - 0xc5, 0x19, 0x48, 0xec, 0x3a, 0x4e, 0x7f, 0x8c, 0xc4, 0x69, 0x93, 0x15, 0xc8, 0x8b, 0x90, 0xc3, - 0x07, 0xef, 0x90, 0xac, 0xfa, 0xb7, 0x40, 0x59, 0xac, 0xe7, 0xe7, 0x62, 0x02, 0xf1, 0x9e, 0xed, - 0x8e, 0xb9, 0x47, 0xc3, 0x67, 0xf2, 0x05, 0xc8, 0xd2, 0xbf, 0x9e, 0x65, 0xdc, 0xcf, 0xa6, 0x81, - 0x56, 0x73, 0xc3, 0x97, 0x60, 0x0e, 0x35, 0xe0, 0xc3, 0x52, 0xfe, 0x8d, 0x4f, 0x8e, 0x35, 0x70, - 0x60, 0x01, 0x52, 0xcc, 0x21, 0x8c, 0xf1, 0x07, 0xdf, 0x8c, 0xe9, 0x15, 0xa9, 0x9b, 0xc5, 0x83, - 0x0a, 0xcb, 0x40, 0x52, 0x26, 0x2f, 0x15, 0xef, 0x41, 0x1a, 0xc3, 0x65, 0xa3, 0xdf, 0x26, 0x2f, - 0x80, 0xd2, 0x2d, 0x58, 0x18, 0xae, 0xcf, 0x86, 0x4e, 0x21, 0x1c, 0xb0, 0xb4, 0x6e, 0x2a, 0xdd, - 0xc5, 0x05, 0x50, 0xd6, 0xe9, 0xb1, 0xe0, 0x80, 0x3b, 0x6c, 0xe5, 0xa0, 0xf8, 0x16, 0x27, 0xd9, - 0xb2, 0x9e, 0xca, 0x49, 0xb6, 0xac, 0xa7, 0x8c, 0xe4, 0xf2, 0x14, 0x09, 0x2d, 0x1d, 0xf2, 0xdf, - 0xc0, 0x95, 0xc3, 0x62, 0x05, 0xe6, 0xf0, 0x45, 0xed, 0xd9, 0xdd, 0x47, 0x4e, 0xcf, 0xc6, 0x83, - 0x48, 0x07, 0x13, 0x38, 0xc5, 0x54, 0x3a, 0x74, 0x1f, 0xac, 0x83, 0xd6, 0x1e, 0x4b, 0x87, 0xd3, - 0x26, 0x2b, 0x14, 0xbf, 0x1f, 0x87, 0x79, 0xee, 0x64, 0xdf, 0xed, 0xb9, 0xfb, 0x9b, 0xad, 0x21, - 0xd9, 0x82, 0x1c, 0xf5, 0xaf, 0xcd, 0x41, 0x6b, 0x38, 0xa4, 0x2f, 0xb2, 0x82, 0xa1, 0xf9, 0xda, - 0x0c, 0xb7, 0xcd, 0x2d, 0x96, 0xb6, 0x5a, 0x03, 0x6b, 0x93, 0xa1, 0x59, 0xa0, 0xce, 0xda, 0x41, - 0x0d, 0x79, 0x00, 0xd9, 0xc1, 0xb8, 0xeb, 0xd3, 0xb1, 0x48, 0x7f, 0x55, 0x42, 0xb7, 0x39, 0xee, - 0x86, 0xd8, 0x60, 0xe0, 0x57, 0xd0, 0xc1, 0x51, 0xef, 0xec, 0xb3, 0xc5, 0x8e, 0x1c, 0x1c, 0x75, - 0x25, 0xe1, 0xc1, 0xed, 0x06, 0x35, 0xa4, 0x0e, 0x40, 0x5f, 0x35, 0xd7, 0xa1, 0x27, 0x3c, 0xd4, - 0x52, 0xb6, 0x5c, 0x92, 0xb0, 0x6d, 0xbb, 0xa3, 0x1d, 0x67, 0xdb, 0x1d, 0xf1, 0x84, 0x64, 0xcc, - 0x8b, 0x8b, 0xaf, 0x83, 0x16, 0x5d, 0x85, 0xa3, 0x72, 0x92, 0x8c, 0x90, 0x93, 0x2c, 0xfe, 0x1c, - 0xe4, 0x23, 0xd3, 0x16, 0xcd, 0x09, 0x33, 0xbf, 0x21, 0x9a, 0x67, 0xcb, 0xe7, 0x43, 0xdf, 0x68, - 0x88, 0x5b, 0x2f, 0x32, 0xbf, 0x0e, 0x5a, 0x74, 0x09, 0x44, 0xea, 0xb4, 0xe4, 0x40, 0x83, 0xf6, - 0xaf, 0xc2, 0x5c, 0x68, 0xd2, 0xa2, 0x71, 0xe6, 0x88, 0x69, 0x15, 0x7f, 0x25, 0x01, 0x89, 0x86, - 0x6d, 0x39, 0x1d, 0x72, 0x2e, 0x1c, 0x3b, 0xdf, 0x3c, 0xe5, 0xc5, 0xcd, 0xf3, 0x91, 0xb8, 0xf9, - 0xe6, 0x29, 0x3f, 0x6a, 0x9e, 0x8f, 0x44, 0x4d, 0xaf, 0xa9, 0x6a, 0x90, 0x8b, 0x53, 0x31, 0xf3, - 0xcd, 0x53, 0x42, 0xc0, 0xbc, 0x38, 0x15, 0x30, 0x83, 0xe6, 0xaa, 0x41, 0x1d, 0x6c, 0x38, 0x5a, - 0xbe, 0x79, 0x2a, 0x88, 0x94, 0x17, 0xa2, 0x91, 0xd2, 0x6f, 0xac, 0x1a, 0x6c, 0x48, 0x42, 0x94, - 0xc4, 0x21, 0xb1, 0xf8, 0x78, 0x21, 0x1a, 0x1f, 0xd1, 0x8e, 0x47, 0xc6, 0x0b, 0xd1, 0xc8, 0x88, - 0x8d, 0x3c, 0x12, 0x9e, 0x8f, 0x44, 0x42, 0x24, 0x65, 0x21, 0xf0, 0x42, 0x34, 0x04, 0x32, 0x3b, - 0x61, 0xa4, 0x62, 0xfc, 0xf3, 0x1b, 0xab, 0x06, 0x31, 0x22, 0xc1, 0x4f, 0x76, 0x10, 0xc1, 0xdd, - 0xc0, 0x30, 0x50, 0xa5, 0x0b, 0xe7, 0x25, 0xa8, 0x79, 0xe9, 0x27, 0x2c, 0xb8, 0xa2, 0x5e, 0x82, - 0x66, 0x40, 0xaa, 0xc3, 0xcf, 0xea, 0x1a, 0x7a, 0xb2, 0x90, 0x38, 0x51, 0x02, 0x4b, 0xf5, 0x26, - 0x7a, 0x34, 0x3a, 0xbb, 0x0e, 0x3b, 0x70, 0x94, 0x60, 0xae, 0xde, 0x7c, 0xd8, 0x1a, 0x75, 0x29, - 0x74, 0xa7, 0xd5, 0xf5, 0x6f, 0x3d, 0xa8, 0x0a, 0xb2, 0x75, 0xde, 0xb2, 0xd3, 0xea, 0x92, 0xb3, - 0x9e, 0xc4, 0xda, 0xd8, 0xaa, 0x70, 0x91, 0x2d, 0x9e, 0xa3, 0x4b, 0xc7, 0xc8, 0xd0, 0x37, 0x2e, - 0x70, 0xdf, 0x78, 0x37, 0x05, 0x89, 0x89, 0xdd, 0x73, 0xec, 0xbb, 0x19, 0x48, 0xb9, 0xce, 0x68, - 0xd0, 0x72, 0x9d, 0xe2, 0x0f, 0x15, 0x80, 0x7b, 0xce, 0x60, 0x30, 0xb1, 0x7b, 0xef, 0x4d, 0x2c, - 0x72, 0x09, 0xb2, 0x83, 0xd6, 0x13, 0xab, 0x39, 0xb0, 0x9a, 0x7b, 0x23, 0xef, 0x6d, 0xc8, 0xd0, - 0xaa, 0x4d, 0xeb, 0xde, 0xe8, 0x90, 0x14, 0xbc, 0x04, 0x1e, 0x15, 0x84, 0xc2, 0xe4, 0x09, 0xfd, - 0x19, 0x9e, 0x8e, 0x26, 0xf9, 0x4e, 0x7a, 0x09, 0x29, 0x3b, 0xe4, 0xa4, 0xf8, 0x1e, 0xb2, 0x63, - 0xce, 0x39, 0x48, 0xba, 0xd6, 0x60, 0xd8, 0xdc, 0x43, 0xc1, 0x50, 0x51, 0x24, 0x68, 0xf9, 0x1e, - 0xb9, 0x01, 0xb1, 0x3d, 0xa7, 0x8f, 0x52, 0x39, 0x72, 0x77, 0x28, 0x92, 0xbc, 0x04, 0xb1, 0xc1, - 0x98, 0xc9, 0x27, 0x5b, 0x3e, 0x1d, 0xca, 0x20, 0x58, 0xc8, 0xa2, 0xc0, 0xc1, 0xb8, 0xeb, 0xcf, - 0xbd, 0xf8, 0xa9, 0x0a, 0x69, 0xba, 0x5f, 0x6f, 0xef, 0xd4, 0x6f, 0xe1, 0xb1, 0x61, 0xaf, 0xd5, - 0xc7, 0x1b, 0x02, 0xfa, 0x9a, 0xf2, 0x12, 0xad, 0xff, 0x8a, 0xb5, 0xe7, 0x3a, 0x23, 0x74, 0xcd, - 0x19, 0x93, 0x97, 0xe8, 0x92, 0xb3, 0xac, 0x38, 0xc6, 0x67, 0xc9, 0x8a, 0x98, 0xd1, 0xb7, 0x86, - 0x4d, 0xea, 0x03, 0x98, 0xbf, 0x0c, 0x9d, 0xae, 0xbd, 0xee, 0xe8, 0xd1, 0xed, 0x81, 0x75, 0xc8, - 0xfc, 0x64, 0x72, 0x80, 0x05, 0xf2, 0xb3, 0xec, 0xc8, 0xc7, 0x76, 0x92, 0x7d, 0x5f, 0x55, 0x7c, - 0x96, 0xf1, 0x3b, 0x14, 0x14, 0x9c, 0xfb, 0xb0, 0xb8, 0x78, 0x1b, 0xb2, 0x02, 0xef, 0x51, 0xae, - 0x28, 0x16, 0xf1, 0x63, 0x21, 0xd6, 0xa3, 0x6e, 0x75, 0x44, 0x3f, 0x46, 0x57, 0xd4, 0xa1, 0x1a, - 0xbe, 0x9a, 0x87, 0x58, 0xbd, 0xd1, 0xa0, 0x79, 0x56, 0xbd, 0xd1, 0x58, 0xd1, 0x94, 0xda, 0x0a, - 0xa4, 0xbb, 0x23, 0xcb, 0xa2, 0xae, 0xf7, 0x59, 0xe7, 0xbc, 0x2f, 0xe3, 0xb2, 0xfa, 0xb0, 0xda, - 0x5b, 0x90, 0xda, 0x63, 0x27, 0x3d, 0xf2, 0xcc, 0x5b, 0x8d, 0xc2, 0x1f, 0xb3, 0xdb, 0xb5, 0xe7, - 0x45, 0x40, 0xf4, 0x7c, 0x68, 0x7a, 0x3c, 0xb5, 0x1d, 0xc8, 0x8c, 0x9a, 0x47, 0x93, 0x7e, 0xc0, - 0x62, 0xb9, 0x9c, 0x34, 0x3d, 0xe2, 0x55, 0xb5, 0x75, 0x58, 0xb0, 0x1d, 0xef, 0x47, 0xbe, 0x66, - 0x9b, 0x7b, 0xb2, 0x59, 0x49, 0xb4, 0xd7, 0x81, 0xc5, 0x3e, 0x15, 0xb0, 0x1d, 0xde, 0xc0, 0xbc, - 0x5f, 0x6d, 0x0d, 0x34, 0x81, 0xa8, 0xc3, 0xdc, 0xa5, 0x8c, 0xa7, 0xc3, 0xbe, 0x4e, 0xf0, 0x79, - 0xd0, 0xc3, 0x46, 0x68, 0xb8, 0x0f, 0x94, 0xd1, 0x74, 0xd9, 0xc7, 0x1e, 0x3e, 0x0d, 0x86, 0x95, - 0x69, 0x1a, 0x1a, 0x11, 0x64, 0x34, 0xfb, 0xec, 0x4b, 0x10, 0x91, 0xa6, 0x6a, 0x44, 0x56, 0x67, - 0x72, 0x8c, 0xe1, 0xf4, 0xd8, 0xa7, 0x1c, 0x3e, 0x0f, 0x0b, 0x38, 0x33, 0x88, 0x8e, 0x1a, 0xd0, - 0x97, 0xd9, 0x77, 0x1e, 0x21, 0xa2, 0xa9, 0x11, 0x8d, 0x8f, 0x31, 0xa2, 0x27, 0xec, 0xb3, 0x0a, - 0x9f, 0x68, 0x7b, 0xd6, 0x88, 0xc6, 0xc7, 0x18, 0x51, 0x9f, 0x7d, 0x72, 0x11, 0x22, 0xaa, 0x1a, - 0xb5, 0x0d, 0x20, 0xe2, 0xc6, 0xf3, 0xe8, 0x2c, 0x65, 0x1a, 0xb0, 0x4f, 0x69, 0x82, 0xad, 0x67, - 0x46, 0xb3, 0xa8, 0x8e, 0x1a, 0x94, 0xcd, 0xbe, 0xb3, 0x09, 0x53, 0x55, 0x8d, 0xda, 0x03, 0x38, - 0x2d, 0x4e, 0xef, 0x58, 0xc3, 0x72, 0xd8, 0x47, 0x22, 0xc1, 0x04, 0xb9, 0xd5, 0x4c, 0xb2, 0xa3, - 0x06, 0x36, 0x64, 0x1f, 0x90, 0x44, 0xc8, 0xaa, 0x46, 0xed, 0x1e, 0xe4, 0x05, 0xb2, 0x5d, 0xbc, - 0x57, 0x90, 0x11, 0xbd, 0xc7, 0x3e, 0x7b, 0xf2, 0x89, 0x68, 0x46, 0x15, 0xdd, 0x3d, 0x96, 0x63, - 0x48, 0x69, 0x46, 0xec, 0xab, 0x9d, 0x60, 0x3c, 0x68, 0x13, 0x79, 0x51, 0x76, 0x59, 0x42, 0x22, - 0xe3, 0x19, 0xb3, 0x2f, 0x7a, 0x82, 0xe1, 0x50, 0x93, 0xda, 0x20, 0x34, 0x29, 0x8b, 0xa6, 0x19, - 0x52, 0x16, 0x17, 0x23, 0x62, 0x49, 0x02, 0x59, 0x12, 0xaf, 0xaf, 0x84, 0xe9, 0xd3, 0x62, 0xed, - 0x01, 0xcc, 0x9f, 0xc4, 0x65, 0x7d, 0xa0, 0xb0, 0xbb, 0x8c, 0xca, 0xd2, 0x8a, 0xb1, 0xb2, 0x6a, - 0xce, 0xb5, 0x43, 0x9e, 0x6b, 0x1d, 0xe6, 0x4e, 0xe0, 0xb6, 0x3e, 0x54, 0xd8, 0x8d, 0x00, 0xe5, - 0x32, 0x73, 0xed, 0xb0, 0xef, 0x9a, 0x3b, 0x81, 0xe3, 0xfa, 0x48, 0x61, 0x57, 0x48, 0x46, 0xd9, - 0xa7, 0xf1, 0x7c, 0xd7, 0xdc, 0x09, 0x1c, 0xd7, 0xc7, 0xec, 0xc4, 0xaf, 0x1a, 0x15, 0x91, 0x06, - 0x3d, 0xc5, 0xfc, 0x49, 0x1c, 0xd7, 0x27, 0x0a, 0x5e, 0x29, 0xa9, 0x86, 0xe1, 0xaf, 0x8f, 0xef, - 0xbb, 0xe6, 0x4f, 0xe2, 0xb8, 0xbe, 0xa6, 0xe0, 0xd5, 0x93, 0x6a, 0xac, 0x86, 0x88, 0xc2, 0x23, - 0x3a, 0x8e, 0xe3, 0xfa, 0x54, 0xc1, 0xfb, 0x20, 0xd5, 0xa8, 0xfa, 0x44, 0xdb, 0x53, 0x23, 0x3a, - 0x8e, 0xe3, 0xfa, 0x3a, 0x9e, 0xaf, 0x6a, 0xaa, 0x71, 0x33, 0x44, 0x84, 0xbe, 0x2b, 0x7f, 0x22, - 0xc7, 0xf5, 0x0d, 0x05, 0xaf, 0xee, 0x54, 0xe3, 0x96, 0xe9, 0x8d, 0x20, 0xf0, 0x5d, 0xf9, 0x13, - 0x39, 0xae, 0x6f, 0x2a, 0x78, 0xc7, 0xa7, 0x1a, 0xb7, 0xc3, 0x54, 0xe8, 0xbb, 0xb4, 0x93, 0x39, - 0xae, 0xcf, 0x14, 0xfc, 0xa2, 0x47, 0x5d, 0x5d, 0x36, 0xbd, 0x41, 0x08, 0xbe, 0x4b, 0x3b, 0x99, - 0xe3, 0xfa, 0x96, 0x82, 0x9f, 0xf9, 0xa8, 0xab, 0x2b, 0x11, 0xb2, 0xaa, 0x51, 0x5b, 0x83, 0xdc, - 0xf1, 0x1d, 0xd7, 0xb7, 0xc5, 0x1b, 0xd4, 0x6c, 0x5b, 0xf0, 0x5e, 0x8f, 0x85, 0xfd, 0x3b, 0x86, - 0xeb, 0xfa, 0x0e, 0x26, 0x7f, 0xb5, 0xe7, 0xde, 0x64, 0xf7, 0x8c, 0xcc, 0xe4, 0x95, 0xb6, 0xd5, - 0x79, 0xad, 0xe3, 0x38, 0xc1, 0x96, 0x32, 0x87, 0xd6, 0x08, 0xde, 0x9e, 0x63, 0x78, 0xb3, 0xef, - 0x2a, 0x78, 0x2d, 0x99, 0xe3, 0xd4, 0x68, 0xe1, 0xbf, 0x47, 0xcc, 0xb5, 0xd9, 0xc1, 0x9c, 0x8f, - 0xf6, 0x6b, 0xdf, 0x53, 0x4e, 0xe6, 0xd8, 0x6a, 0xb1, 0xc6, 0xd6, 0x9a, 0xbf, 0x38, 0x58, 0xf3, - 0x06, 0xc4, 0x0f, 0xca, 0xcb, 0x2b, 0xe1, 0x14, 0x4f, 0xbc, 0x95, 0x67, 0xee, 0x2c, 0x5b, 0x5e, - 0x08, 0xfd, 0x7c, 0x31, 0x18, 0xba, 0x87, 0x26, 0x5a, 0x72, 0x86, 0xb2, 0x84, 0xe1, 0x43, 0x29, - 0x43, 0x99, 0x33, 0x54, 0x24, 0x0c, 0x1f, 0x49, 0x19, 0x2a, 0x9c, 0xc1, 0x90, 0x30, 0x7c, 0x2c, - 0x65, 0x30, 0x38, 0xc3, 0xaa, 0x84, 0xe1, 0x13, 0x29, 0xc3, 0x2a, 0x67, 0xa8, 0x4a, 0x18, 0xbe, - 0x26, 0x65, 0xa8, 0x72, 0x86, 0x9b, 0x12, 0x86, 0x4f, 0xa5, 0x0c, 0x37, 0x39, 0xc3, 0x2d, 0x09, - 0xc3, 0xd7, 0xa5, 0x0c, 0xb7, 0x38, 0xc3, 0x6d, 0x09, 0xc3, 0x37, 0xa4, 0x0c, 0xb7, 0x19, 0xc3, - 0xca, 0xb2, 0x84, 0xe1, 0x9b, 0x32, 0x86, 0x95, 0x65, 0xce, 0x20, 0xd3, 0xe4, 0x67, 0x52, 0x06, - 0xae, 0xc9, 0x15, 0x99, 0x26, 0xbf, 0x25, 0x65, 0xe0, 0x9a, 0x5c, 0x91, 0x69, 0xf2, 0xdb, 0x52, - 0x06, 0xae, 0xc9, 0x15, 0x99, 0x26, 0xbf, 0x23, 0x65, 0xe0, 0x9a, 0x5c, 0x91, 0x69, 0xf2, 0xbb, - 0x52, 0x06, 0xae, 0xc9, 0x15, 0x99, 0x26, 0xbf, 0x27, 0x65, 0xe0, 0x9a, 0x5c, 0x91, 0x69, 0xf2, - 0x4f, 0xa4, 0x0c, 0x5c, 0x93, 0x2b, 0x32, 0x4d, 0xfe, 0xa9, 0x94, 0x81, 0x6b, 0x72, 0x45, 0xa6, - 0xc9, 0x3f, 0x93, 0x32, 0x70, 0x4d, 0x96, 0x65, 0x9a, 0xfc, 0xbe, 0x8c, 0xa1, 0xcc, 0x35, 0x59, - 0x96, 0x69, 0xf2, 0xcf, 0xa5, 0x0c, 0x5c, 0x93, 0x65, 0x99, 0x26, 0xff, 0x42, 0xca, 0xc0, 0x35, - 0x59, 0x96, 0x69, 0xf2, 0x07, 0x52, 0x06, 0xae, 0xc9, 0xb2, 0x4c, 0x93, 0x7f, 0x29, 0x65, 0xe0, - 0x9a, 0x2c, 0xcb, 0x34, 0xf9, 0x57, 0x52, 0x06, 0xae, 0xc9, 0xb2, 0x4c, 0x93, 0x7f, 0x2d, 0x65, - 0xe0, 0x9a, 0x2c, 0xcb, 0x34, 0xf9, 0x37, 0x52, 0x06, 0xae, 0xc9, 0xb2, 0x4c, 0x93, 0x7f, 0x2b, - 0x65, 0xe0, 0x9a, 0x2c, 0xcb, 0x34, 0xf9, 0x77, 0x52, 0x06, 0xae, 0xc9, 0x8a, 0x4c, 0x93, 0x7f, - 0x2f, 0x63, 0xa8, 0x70, 0x4d, 0x56, 0x64, 0x9a, 0xfc, 0x07, 0x29, 0x03, 0xd7, 0x64, 0x45, 0xa6, - 0xc9, 0x7f, 0x94, 0x32, 0x70, 0x4d, 0x56, 0x64, 0x9a, 0xfc, 0x27, 0x29, 0x03, 0xd7, 0x64, 0x45, - 0xa6, 0xc9, 0x7f, 0x96, 0x32, 0x70, 0x4d, 0x56, 0x64, 0x9a, 0xfc, 0x17, 0x29, 0x03, 0xd7, 0x64, - 0x45, 0xa6, 0xc9, 0x7f, 0x95, 0x32, 0x70, 0x4d, 0x56, 0x64, 0x9a, 0xfc, 0x37, 0x29, 0x03, 0xd7, - 0x64, 0x45, 0xa6, 0xc9, 0x1f, 0x4a, 0x19, 0xb8, 0x26, 0x2b, 0x32, 0x4d, 0xfe, 0xbb, 0x94, 0x81, - 0x6b, 0xd2, 0x90, 0x69, 0xf2, 0x3f, 0x64, 0x0c, 0x06, 0xd7, 0xa4, 0x21, 0xd3, 0xe4, 0x7f, 0x4a, - 0x19, 0xb8, 0x26, 0x0d, 0x99, 0x26, 0xff, 0x4b, 0xca, 0xc0, 0x35, 0x69, 0xc8, 0x34, 0xf9, 0xdf, - 0x52, 0x06, 0xae, 0x49, 0x43, 0xa6, 0xc9, 0xff, 0x91, 0x32, 0x70, 0x4d, 0x1a, 0x32, 0x4d, 0xfe, - 0xaf, 0x94, 0x81, 0x6b, 0xd2, 0x90, 0x69, 0xf2, 0x47, 0x52, 0x06, 0xae, 0x49, 0x43, 0xa6, 0xc9, - 0x1f, 0x4b, 0x19, 0xb8, 0x26, 0x0d, 0x99, 0x26, 0x7f, 0x22, 0x65, 0xe0, 0x9a, 0x34, 0x64, 0x9a, - 0xfc, 0xa9, 0x94, 0x81, 0x6b, 0x72, 0x55, 0xa6, 0xc9, 0xff, 0x93, 0x31, 0xac, 0x2e, 0xdf, 0xbd, - 0xfe, 0xf8, 0x5a, 0xb7, 0xe7, 0xee, 0x4f, 0x76, 0x97, 0xf6, 0x9c, 0xc1, 0x8d, 0xae, 0xd3, 0x6f, - 0xd9, 0xdd, 0x1b, 0x08, 0xdb, 0x9d, 0x74, 0x6e, 0x04, 0xff, 0xcc, 0xce, 0x4c, 0xff, 0x3f, 0x00, - 0x00, 0xff, 0xff, 0x8e, 0xb4, 0x0c, 0xbd, 0xe4, 0x3e, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/proto/test_proto/test.proto b/vendor/github.com/golang/protobuf/proto/test_proto/test.proto deleted file mode 100644 index f339e05c..00000000 --- a/vendor/github.com/golang/protobuf/proto/test_proto/test.proto +++ /dev/null @@ -1,570 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// A feature-rich test file for the protocol compiler and libraries. - -syntax = "proto2"; - -option go_package = "github.com/golang/protobuf/proto/test_proto"; - -package test_proto; - -enum FOO { FOO1 = 1; }; - -message GoEnum { - required FOO foo = 1; -} - -message GoTestField { - required string Label = 1; - required string Type = 2; -} - -message GoTest { - // An enum, for completeness. - enum KIND { - VOID = 0; - - // Basic types - BOOL = 1; - BYTES = 2; - FINGERPRINT = 3; - FLOAT = 4; - INT = 5; - STRING = 6; - TIME = 7; - - // Groupings - TUPLE = 8; - ARRAY = 9; - MAP = 10; - - // Table types - TABLE = 11; - - // Functions - FUNCTION = 12; // last tag - }; - - // Some typical parameters - required KIND Kind = 1; - optional string Table = 2; - optional int32 Param = 3; - - // Required, repeated and optional foreign fields. - required GoTestField RequiredField = 4; - repeated GoTestField RepeatedField = 5; - optional GoTestField OptionalField = 6; - - // Required fields of all basic types - required bool F_Bool_required = 10; - required int32 F_Int32_required = 11; - required int64 F_Int64_required = 12; - required fixed32 F_Fixed32_required = 13; - required fixed64 F_Fixed64_required = 14; - required uint32 F_Uint32_required = 15; - required uint64 F_Uint64_required = 16; - required float F_Float_required = 17; - required double F_Double_required = 18; - required string F_String_required = 19; - required bytes F_Bytes_required = 101; - required sint32 F_Sint32_required = 102; - required sint64 F_Sint64_required = 103; - required sfixed32 F_Sfixed32_required = 104; - required sfixed64 F_Sfixed64_required = 105; - - // Repeated fields of all basic types - repeated bool F_Bool_repeated = 20; - repeated int32 F_Int32_repeated = 21; - repeated int64 F_Int64_repeated = 22; - repeated fixed32 F_Fixed32_repeated = 23; - repeated fixed64 F_Fixed64_repeated = 24; - repeated uint32 F_Uint32_repeated = 25; - repeated uint64 F_Uint64_repeated = 26; - repeated float F_Float_repeated = 27; - repeated double F_Double_repeated = 28; - repeated string F_String_repeated = 29; - repeated bytes F_Bytes_repeated = 201; - repeated sint32 F_Sint32_repeated = 202; - repeated sint64 F_Sint64_repeated = 203; - repeated sfixed32 F_Sfixed32_repeated = 204; - repeated sfixed64 F_Sfixed64_repeated = 205; - - // Optional fields of all basic types - optional bool F_Bool_optional = 30; - optional int32 F_Int32_optional = 31; - optional int64 F_Int64_optional = 32; - optional fixed32 F_Fixed32_optional = 33; - optional fixed64 F_Fixed64_optional = 34; - optional uint32 F_Uint32_optional = 35; - optional uint64 F_Uint64_optional = 36; - optional float F_Float_optional = 37; - optional double F_Double_optional = 38; - optional string F_String_optional = 39; - optional bytes F_Bytes_optional = 301; - optional sint32 F_Sint32_optional = 302; - optional sint64 F_Sint64_optional = 303; - optional sfixed32 F_Sfixed32_optional = 304; - optional sfixed64 F_Sfixed64_optional = 305; - - // Default-valued fields of all basic types - optional bool F_Bool_defaulted = 40 [default=true]; - optional int32 F_Int32_defaulted = 41 [default=32]; - optional int64 F_Int64_defaulted = 42 [default=64]; - optional fixed32 F_Fixed32_defaulted = 43 [default=320]; - optional fixed64 F_Fixed64_defaulted = 44 [default=640]; - optional uint32 F_Uint32_defaulted = 45 [default=3200]; - optional uint64 F_Uint64_defaulted = 46 [default=6400]; - optional float F_Float_defaulted = 47 [default=314159.]; - optional double F_Double_defaulted = 48 [default=271828.]; - optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; - optional sint32 F_Sint32_defaulted = 402 [default = -32]; - optional sint64 F_Sint64_defaulted = 403 [default = -64]; - optional sfixed32 F_Sfixed32_defaulted = 404 [default = -32]; - optional sfixed64 F_Sfixed64_defaulted = 405 [default = -64]; - - // Packed repeated fields (no string or bytes). - repeated bool F_Bool_repeated_packed = 50 [packed=true]; - repeated int32 F_Int32_repeated_packed = 51 [packed=true]; - repeated int64 F_Int64_repeated_packed = 52 [packed=true]; - repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; - repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; - repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; - repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; - repeated float F_Float_repeated_packed = 57 [packed=true]; - repeated double F_Double_repeated_packed = 58 [packed=true]; - repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; - repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; - repeated sfixed32 F_Sfixed32_repeated_packed = 504 [packed=true]; - repeated sfixed64 F_Sfixed64_repeated_packed = 505 [packed=true]; - - // Required, repeated, and optional groups. - required group RequiredGroup = 70 { - required string RequiredField = 71; - }; - - repeated group RepeatedGroup = 80 { - required string RequiredField = 81; - }; - - optional group OptionalGroup = 90 { - required string RequiredField = 91; - }; -} - -// For testing a group containing a required field. -message GoTestRequiredGroupField { - required group Group = 1 { - required int32 Field = 2; - }; -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -message GoSkipTest { - required int32 skip_int32 = 11; - required fixed32 skip_fixed32 = 12; - required fixed64 skip_fixed64 = 13; - required string skip_string = 14; - required group SkipGroup = 15 { - required int32 group_int32 = 16; - required string group_string = 17; - } -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -message NonPackedTest { - repeated int32 a = 1; -} - -message PackedTest { - repeated int32 b = 1 [packed=true]; -} - -message MaxTag { - // Maximum possible tag number. - optional string last_field = 536870911; -} - -message OldMessage { - message Nested { - optional string name = 1; - } - optional Nested nested = 1; - - optional int32 num = 2; -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -message NewMessage { - message Nested { - optional string name = 1; - optional string food_group = 2; - } - optional Nested nested = 1; - - // This is an int32 in OldMessage. - optional int64 num = 2; -} - -// Smaller tests for ASCII formatting. - -message InnerMessage { - required string host = 1; - optional int32 port = 2 [default=4000]; - optional bool connected = 3; -} - -message OtherMessage { - optional int64 key = 1; - optional bytes value = 2; - optional float weight = 3; - optional InnerMessage inner = 4; - - extensions 100 to max; -} - -message RequiredInnerMessage { - required InnerMessage leo_finally_won_an_oscar = 1; -} - -message MyMessage { - required int32 count = 1; - optional string name = 2; - optional string quote = 3; - repeated string pet = 4; - optional InnerMessage inner = 5; - repeated OtherMessage others = 6; - optional RequiredInnerMessage we_must_go_deeper = 13; - repeated InnerMessage rep_inner = 12; - - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - }; - optional Color bikeshed = 7; - - optional group SomeGroup = 8 { - optional int32 group_field = 9; - } - - // This field becomes [][]byte in the generated code. - repeated bytes rep_bytes = 10; - - optional double bigfloat = 11; - - extensions 100 to max; -} - -message Ext { - extend MyMessage { - optional Ext more = 103; - optional string text = 104; - optional int32 number = 105; - } - - optional string data = 1; - map map_field = 2; -} - -extend MyMessage { - repeated string greeting = 106; - // leave field 200 unregistered for testing -} - -message ComplexExtension { - optional int32 first = 1; - optional int32 second = 2; - repeated int32 third = 3; -} - -extend OtherMessage { - optional ComplexExtension complex = 200; - repeated ComplexExtension r_complex = 201; -} - -message DefaultsMessage { - enum DefaultsEnum { - ZERO = 0; - ONE = 1; - TWO = 2; - }; - extensions 100 to max; -} - -extend DefaultsMessage { - optional double no_default_double = 101; - optional float no_default_float = 102; - optional int32 no_default_int32 = 103; - optional int64 no_default_int64 = 104; - optional uint32 no_default_uint32 = 105; - optional uint64 no_default_uint64 = 106; - optional sint32 no_default_sint32 = 107; - optional sint64 no_default_sint64 = 108; - optional fixed32 no_default_fixed32 = 109; - optional fixed64 no_default_fixed64 = 110; - optional sfixed32 no_default_sfixed32 = 111; - optional sfixed64 no_default_sfixed64 = 112; - optional bool no_default_bool = 113; - optional string no_default_string = 114; - optional bytes no_default_bytes = 115; - optional DefaultsMessage.DefaultsEnum no_default_enum = 116; - - optional double default_double = 201 [default = 3.1415]; - optional float default_float = 202 [default = 3.14]; - optional int32 default_int32 = 203 [default = 42]; - optional int64 default_int64 = 204 [default = 43]; - optional uint32 default_uint32 = 205 [default = 44]; - optional uint64 default_uint64 = 206 [default = 45]; - optional sint32 default_sint32 = 207 [default = 46]; - optional sint64 default_sint64 = 208 [default = 47]; - optional fixed32 default_fixed32 = 209 [default = 48]; - optional fixed64 default_fixed64 = 210 [default = 49]; - optional sfixed32 default_sfixed32 = 211 [default = 50]; - optional sfixed64 default_sfixed64 = 212 [default = 51]; - optional bool default_bool = 213 [default = true]; - optional string default_string = 214 [default = "Hello, string,def=foo"]; - optional bytes default_bytes = 215 [default = "Hello, bytes"]; - optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; -} - -message MyMessageSet { - option message_set_wire_format = true; - extensions 100 to max; -} - -message Empty { -} - -extend MyMessageSet { - optional Empty x201 = 201; - optional Empty x202 = 202; - optional Empty x203 = 203; - optional Empty x204 = 204; - optional Empty x205 = 205; - optional Empty x206 = 206; - optional Empty x207 = 207; - optional Empty x208 = 208; - optional Empty x209 = 209; - optional Empty x210 = 210; - optional Empty x211 = 211; - optional Empty x212 = 212; - optional Empty x213 = 213; - optional Empty x214 = 214; - optional Empty x215 = 215; - optional Empty x216 = 216; - optional Empty x217 = 217; - optional Empty x218 = 218; - optional Empty x219 = 219; - optional Empty x220 = 220; - optional Empty x221 = 221; - optional Empty x222 = 222; - optional Empty x223 = 223; - optional Empty x224 = 224; - optional Empty x225 = 225; - optional Empty x226 = 226; - optional Empty x227 = 227; - optional Empty x228 = 228; - optional Empty x229 = 229; - optional Empty x230 = 230; - optional Empty x231 = 231; - optional Empty x232 = 232; - optional Empty x233 = 233; - optional Empty x234 = 234; - optional Empty x235 = 235; - optional Empty x236 = 236; - optional Empty x237 = 237; - optional Empty x238 = 238; - optional Empty x239 = 239; - optional Empty x240 = 240; - optional Empty x241 = 241; - optional Empty x242 = 242; - optional Empty x243 = 243; - optional Empty x244 = 244; - optional Empty x245 = 245; - optional Empty x246 = 246; - optional Empty x247 = 247; - optional Empty x248 = 248; - optional Empty x249 = 249; - optional Empty x250 = 250; -} - -message MessageList { - repeated group Message = 1 { - required string name = 2; - required int32 count = 3; - } -} - -message Strings { - optional string string_field = 1; - optional bytes bytes_field = 2; -} - -message Defaults { - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - } - - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - optional bool F_Bool = 1 [default=true]; - optional int32 F_Int32 = 2 [default=32]; - optional int64 F_Int64 = 3 [default=64]; - optional fixed32 F_Fixed32 = 4 [default=320]; - optional fixed64 F_Fixed64 = 5 [default=640]; - optional uint32 F_Uint32 = 6 [default=3200]; - optional uint64 F_Uint64 = 7 [default=6400]; - optional float F_Float = 8 [default=314159.]; - optional double F_Double = 9 [default=271828.]; - optional string F_String = 10 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes = 11 [default="Bignose"]; - optional sint32 F_Sint32 = 12 [default=-32]; - optional sint64 F_Sint64 = 13 [default=-64]; - optional Color F_Enum = 14 [default=GREEN]; - - // More fields with crazy defaults. - optional float F_Pinf = 15 [default=inf]; - optional float F_Ninf = 16 [default=-inf]; - optional float F_Nan = 17 [default=nan]; - - // Sub-message. - optional SubDefaults sub = 18; - - // Redundant but explicit defaults. - optional string str_zero = 19 [default=""]; -} - -message SubDefaults { - optional int64 n = 1 [default=7]; -} - -message RepeatedEnum { - enum Color { - RED = 1; - } - repeated Color color = 1; -} - -message MoreRepeated { - repeated bool bools = 1; - repeated bool bools_packed = 2 [packed=true]; - repeated int32 ints = 3; - repeated int32 ints_packed = 4 [packed=true]; - repeated int64 int64s_packed = 7 [packed=true]; - repeated string strings = 5; - repeated fixed32 fixeds = 6; -} - -// GroupOld and GroupNew have the same wire format. -// GroupNew has a new field inside a group. - -message GroupOld { - optional group G = 101 { - optional int32 x = 2; - } -} - -message GroupNew { - optional group G = 101 { - optional int32 x = 2; - optional int32 y = 3; - } -} - -message FloatingPoint { - required double f = 1; - optional bool exact = 2; -} - -message MessageWithMap { - map name_mapping = 1; - map msg_mapping = 2; - map byte_mapping = 3; - map str_to_str = 4; -} - -message Oneof { - oneof union { - bool F_Bool = 1; - int32 F_Int32 = 2; - int64 F_Int64 = 3; - fixed32 F_Fixed32 = 4; - fixed64 F_Fixed64 = 5; - uint32 F_Uint32 = 6; - uint64 F_Uint64 = 7; - float F_Float = 8; - double F_Double = 9; - string F_String = 10; - bytes F_Bytes = 11; - sint32 F_Sint32 = 12; - sint64 F_Sint64 = 13; - MyMessage.Color F_Enum = 14; - GoTestField F_Message = 15; - group F_Group = 16 { - optional int32 x = 17; - } - int32 F_Largest_Tag = 536870911; - } - - oneof tormato { - int32 value = 100; - } -} - -message Communique { - optional bool make_me_cry = 1; - - // This is a oneof, called "union". - oneof union { - int32 number = 5; - string name = 6; - bytes data = 7; - double temp_c = 8; - MyMessage.Color col = 9; - Strings msg = 10; - } -} - -message TestUTF8 { - optional string scalar = 1; - repeated string vector = 2; - oneof oneof { string field = 3; } - map map_key = 4; - map map_value = 5; -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 1aaee725..00000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 00000000..47eb3e44 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(rune(i)), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 00000000..a31134ee --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3af..00000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/github.com/golang/protobuf/proto/text_parser_test.go deleted file mode 100644 index a8198087..00000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser_test.go +++ /dev/null @@ -1,706 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "fmt" - "math" - "testing" - - . "github.com/golang/protobuf/proto" - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - . "github.com/golang/protobuf/proto/test_proto" -) - -type UnmarshalTextTest struct { - in string - err string // if "", no error expected - out *MyMessage -} - -func buildExtStructTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_More, &Ext{ - Data: String("Hello, world!"), - }) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtDataTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_Text, String("Hello, world!")) - SetExtension(msg, E_Ext_Number, Int32(1729)) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtRepStringTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { - panic(err) - } - return UnmarshalTextTest{in: text, out: msg} -} - -var unMarshalTextTests = []UnmarshalTextTest{ - // Basic - { - in: " count:42\n name:\"Dave\" ", - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - }, - }, - - // Empty quoted string - { - in: `count:42 name:""`, - out: &MyMessage{ - Count: Int32(42), - Name: String(""), - }, - }, - - // Quoted string concatenation with double quotes - { - in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string concatenation with single quotes - { - in: "count:42 name: 'My name is '\n'elsewhere'", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string concatenations with mixed quotes - { - in: "count:42 name: 'My name is '\n\"elsewhere\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - { - in: "count:42 name: \"My name is \"\n'elsewhere'", - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string with escaped apostrophe - { - in: `count:42 name: "HOLIDAY - New Year\'s Day"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("HOLIDAY - New Year's Day"), - }, - }, - - // Quoted string with single quote - { - in: `count:42 name: 'Roger "The Ramster" Ramjet'`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`Roger "The Ramster" Ramjet`), - }, - }, - - // Quoted string with all the accepted special characters from the C++ test - { - in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), - }, - }, - - // Quoted string with quoted backslash - { - in: `count:42 name: "\\'xyz"`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`\'xyz`), - }, - }, - - // Quoted string with UTF-8 bytes. - { - in: "count:42 name: '\303\277\302\201\x00\xAB\xCD\xEF'", - out: &MyMessage{ - Count: Int32(42), - Name: String("\303\277\302\201\x00\xAB\xCD\xEF"), - }, - }, - - // Quoted string with unicode escapes. - { - in: `count: 42 name: "\u0047\U00000047\uffff\U0010ffff"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("GG\uffff\U0010ffff"), - }, - }, - - // Bad quoted string - { - in: `inner: < host: "\0" >` + "\n", - err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, - }, - - // Bad \u escape - { - in: `count: 42 name: "\u000"`, - err: `line 1.16: invalid quoted string "\u000": \u requires 4 following digits`, - }, - - // Bad \U escape - { - in: `count: 42 name: "\U0000000"`, - err: `line 1.16: invalid quoted string "\U0000000": \U requires 8 following digits`, - }, - - // Bad \U escape - { - in: `count: 42 name: "\xxx"`, - err: `line 1.16: invalid quoted string "\xxx": \xxx contains non-hexadecimal digits`, - }, - - // Number too large for int64 - { - in: "count: 1 others { key: 123456789012345678901 }", - err: "line 1.23: invalid int64: 123456789012345678901", - }, - - // Number too large for int32 - { - in: "count: 1234567890123", - err: "line 1.7: invalid int32: 1234567890123", - }, - - // Number in hexadecimal - { - in: "count: 0x2beef", - out: &MyMessage{ - Count: Int32(0x2beef), - }, - }, - - // Number in octal - { - in: "count: 024601", - out: &MyMessage{ - Count: Int32(024601), - }, - }, - - // Floating point number with "f" suffix - { - in: "count: 4 others:< weight: 17.0f >", - out: &MyMessage{ - Count: Int32(4), - Others: []*OtherMessage{ - { - Weight: Float32(17), - }, - }, - }, - }, - - // Floating point positive infinity - { - in: "count: 4 bigfloat: inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(1)), - }, - }, - - // Floating point negative infinity - { - in: "count: 4 bigfloat: -inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(-1)), - }, - }, - - // Number too large for float32 - { - in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", - err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", - }, - - // Number posing as a quoted string - { - in: `inner: < host: 12 >` + "\n", - err: `line 1.15: invalid string: 12`, - }, - - // Quoted string posing as int32 - { - in: `count: "12"`, - err: `line 1.7: invalid int32: "12"`, - }, - - // Quoted string posing a float32 - { - in: `others:< weight: "17.4" >`, - err: `line 1.17: invalid float32: "17.4"`, - }, - - // unclosed bracket doesn't cause infinite loop - { - in: `[`, - err: `line 1.0: unclosed type_url or extension name`, - }, - - // Enum - { - in: `count:42 bikeshed: BLUE`, - out: &MyMessage{ - Count: Int32(42), - Bikeshed: MyMessage_BLUE.Enum(), - }, - }, - - // Repeated field - { - in: `count:42 pet: "horsey" pet:"bunny"`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated field with list notation - { - in: `count:42 pet: ["horsey", "bunny"]`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated message with/without colon and <>/{} - { - in: `count:42 others:{} others{} others:<> others:{}`, - out: &MyMessage{ - Count: Int32(42), - Others: []*OtherMessage{ - {}, - {}, - {}, - {}, - }, - }, - }, - - // Missing colon for inner message - { - in: `count:42 inner < host: "cauchy.syd" >`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("cauchy.syd"), - }, - }, - }, - - // Missing colon for string field - { - in: `name "Dave"`, - err: `line 1.5: expected ':', found "\"Dave\""`, - }, - - // Missing colon for int32 field - { - in: `count 42`, - err: `line 1.6: expected ':', found "42"`, - }, - - // Missing required field - { - in: `name: "Pawel"`, - err: fmt.Sprintf(`proto: required field "%T.count" not set`, MyMessage{}), - out: &MyMessage{ - Name: String("Pawel"), - }, - }, - - // Missing required field in a required submessage - { - in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, - err: fmt.Sprintf(`proto: required field "%T.host" not set`, InnerMessage{}), - out: &MyMessage{ - Count: Int32(42), - WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, - }, - }, - - // Repeated non-repeated field - { - in: `name: "Rob" name: "Russ"`, - err: `line 1.12: non-repeated field "name" was repeated`, - }, - - // Group - { - in: `count: 17 SomeGroup { group_field: 12 }`, - out: &MyMessage{ - Count: Int32(17), - Somegroup: &MyMessage_SomeGroup{ - GroupField: Int32(12), - }, - }, - }, - - // Semicolon between fields - { - in: `count:3;name:"Calvin"`, - out: &MyMessage{ - Count: Int32(3), - Name: String("Calvin"), - }, - }, - // Comma between fields - { - in: `count:4,name:"Ezekiel"`, - out: &MyMessage{ - Count: Int32(4), - Name: String("Ezekiel"), - }, - }, - - // Boolean false - { - in: `count:42 inner { host: "example.com" connected: false }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean true - { - in: `count:42 inner { host: "example.com" connected: true }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean 0 - { - in: `count:42 inner { host: "example.com" connected: 0 }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean 1 - { - in: `count:42 inner { host: "example.com" connected: 1 }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean f - { - in: `count:42 inner { host: "example.com" connected: f }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean t - { - in: `count:42 inner { host: "example.com" connected: t }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - // Boolean False - { - in: `count:42 inner { host: "example.com" connected: False }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(false), - }, - }, - }, - // Boolean True - { - in: `count:42 inner { host: "example.com" connected: True }`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("example.com"), - Connected: Bool(true), - }, - }, - }, - - // Extension - buildExtStructTest(`count: 42 [test_proto.Ext.more]:`), - buildExtStructTest(`count: 42 [test_proto.Ext.more] {data:"Hello, world!"}`), - buildExtDataTest(`count: 42 [test_proto.Ext.text]:"Hello, world!" [test_proto.Ext.number]:1729`), - buildExtRepStringTest(`count: 42 [test_proto.greeting]:"bula" [test_proto.greeting]:"hola"`), - - // Big all-in-one - { - in: "count:42 # Meaning\n" + - `name:"Dave" ` + - `quote:"\"I didn't want to go.\"" ` + - `pet:"bunny" ` + - `pet:"kitty" ` + - `pet:"horsey" ` + - `inner:<` + - ` host:"footrest.syd" ` + - ` port:7001 ` + - ` connected:true ` + - `> ` + - `others:<` + - ` key:3735928559 ` + - ` value:"\x01A\a\f" ` + - `> ` + - `others:<` + - " weight:58.9 # Atomic weight of Co\n" + - ` inner:<` + - ` host:"lesha.mtv" ` + - ` port:8002 ` + - ` >` + - `>`, - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - Quote: String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &InnerMessage{ - Host: String("footrest.syd"), - Port: Int32(7001), - Connected: Bool(true), - }, - Others: []*OtherMessage{ - { - Key: Int64(3735928559), - Value: []byte{0x1, 'A', '\a', '\f'}, - }, - { - Weight: Float32(58.9), - Inner: &InnerMessage{ - Host: String("lesha.mtv"), - Port: Int32(8002), - }, - }, - }, - }, - }, -} - -func TestUnmarshalText(t *testing.T) { - for i, test := range unMarshalTextTests { - pb := new(MyMessage) - err := UnmarshalText(test.in, pb) - if test.err == "" { - // We don't expect failure. - if err != nil { - t.Errorf("Test %d: Unexpected error: %v", i, err) - } else if !Equal(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } else { - // We do expect failure. - if err == nil { - t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) - } else if err.Error() != test.err { - t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", - i, err.Error(), test.err) - } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !Equal(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } - } -} - -func TestUnmarshalTextCustomMessage(t *testing.T) { - msg := &textMessage{} - if err := UnmarshalText("custom", msg); err != nil { - t.Errorf("Unexpected error from custom unmarshal: %v", err) - } - if UnmarshalText("not custom", msg) == nil { - t.Errorf("Didn't get expected error from custom unmarshal") - } -} - -// Regression test; this caused a panic. -func TestRepeatedEnum(t *testing.T) { - pb := new(RepeatedEnum) - if err := UnmarshalText("color: RED", pb); err != nil { - t.Fatal(err) - } - exp := &RepeatedEnum{ - Color: []RepeatedEnum_Color{RepeatedEnum_RED}, - } - if !Equal(pb, exp) { - t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) - } -} - -func TestProto3TextParsing(t *testing.T) { - m := new(proto3pb.Message) - const in = `name: "Wallace" true_scotsman: true` - want := &proto3pb.Message{ - Name: "Wallace", - TrueScotsman: true, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestMapParsing(t *testing.T) { - m := new(MessageWithMap) - const in = `name_mapping: name_mapping:` + - `msg_mapping:,>` + // separating commas are okay - `msg_mapping>` + // no colon after "value" - `msg_mapping:>` + // omitted key - `msg_mapping:` + // omitted value - `byte_mapping:` + - `byte_mapping:<>` // omitted key and value - want := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Beatles", - 1234: "Feist", - }, - MsgMapping: map[int64]*FloatingPoint{ - -4: {F: Float64(2.0)}, - -2: {F: Float64(4.0)}, - 0: {F: Float64(5.0)}, - 1: nil, - }, - ByteMapping: map[bool][]byte{ - false: nil, - true: []byte("so be it"), - }, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestOneofParsing(t *testing.T) { - const in = `name:"Shrek"` - m := new(Communique) - want := &Communique{Union: &Communique_Name{"Shrek"}} - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } - - const inOverwrite = `name:"Shrek" number:42` - m = new(Communique) - testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'" - if err := UnmarshalText(inOverwrite, m); err == nil { - t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr) - } else if err.Error() != testErr { - t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v", - err.Error(), testErr) - } - -} - -var benchInput string - -func init() { - benchInput = "count: 4\n" - for i := 0; i < 1000; i++ { - benchInput += "pet: \"fido\"\n" - } - - // Check it is valid input. - pb := new(MyMessage) - err := UnmarshalText(benchInput, pb) - if err != nil { - panic("Bad benchmark input: " + err.Error()) - } -} - -func BenchmarkUnmarshalText(b *testing.B) { - pb := new(MyMessage) - for i := 0; i < b.N; i++ { - UnmarshalText(benchInput, pb) - } - b.SetBytes(int64(len(benchInput))) -} diff --git a/vendor/github.com/golang/protobuf/proto/text_test.go b/vendor/github.com/golang/protobuf/proto/text_test.go deleted file mode 100644 index 3c8b033c..00000000 --- a/vendor/github.com/golang/protobuf/proto/text_test.go +++ /dev/null @@ -1,518 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "errors" - "io/ioutil" - "math" - "strings" - "sync" - "testing" - - "github.com/golang/protobuf/proto" - - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/test_proto" - anypb "github.com/golang/protobuf/ptypes/any" -) - -// textMessage implements the methods that allow it to marshal and unmarshal -// itself as text. -type textMessage struct { -} - -func (*textMessage) MarshalText() ([]byte, error) { - return []byte("custom"), nil -} - -func (*textMessage) UnmarshalText(bytes []byte) error { - if string(bytes) != "custom" { - return errors.New("expected 'custom'") - } - return nil -} - -func (*textMessage) Reset() {} -func (*textMessage) String() string { return "" } -func (*textMessage) ProtoMessage() {} - -func newTestMessage() *pb.MyMessage { - msg := &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Quote: proto.String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("footrest.syd"), - Port: proto.Int32(7001), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(0xdeadbeef), - Value: []byte{1, 65, 7, 12}, - }, - { - Weight: proto.Float32(6.022), - Inner: &pb.InnerMessage{ - Host: proto.String("lesha.mtv"), - Port: proto.Int32(8002), - }, - }, - }, - Bikeshed: pb.MyMessage_BLUE.Enum(), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(8), - }, - // One normally wouldn't do this. - // This is an undeclared tag 13, as a varint (wire type 0) with value 4. - XXX_unrecognized: []byte{13<<3 | 0, 4}, - } - ext := &pb.Ext{ - Data: proto.String("Big gobs for big rats"), - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { - panic(err) - } - greetings := []string{"adg", "easy", "cow"} - if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { - panic(err) - } - - // Add an unknown extension. We marshal a pb.Ext, and fake the ID. - b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) - if err != nil { - panic(err) - } - b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) - proto.SetRawExtension(msg, 201, b) - - // Extensions can be plain fields, too, so let's test that. - b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) - proto.SetRawExtension(msg, 202, b) - - return msg -} - -const text = `count: 42 -name: "Dave" -quote: "\"I didn't want to go.\"" -pet: "bunny" -pet: "kitty" -pet: "horsey" -inner: < - host: "footrest.syd" - port: 7001 - connected: true -> -others: < - key: 3735928559 - value: "\001A\007\014" -> -others: < - weight: 6.022 - inner: < - host: "lesha.mtv" - port: 8002 - > -> -bikeshed: BLUE -SomeGroup { - group_field: 8 -} -/* 2 unknown bytes */ -13: 4 -[test_proto.Ext.more]: < - data: "Big gobs for big rats" -> -[test_proto.greeting]: "adg" -[test_proto.greeting]: "easy" -[test_proto.greeting]: "cow" -/* 13 unknown bytes */ -201: "\t3G skiing" -/* 3 unknown bytes */ -202: 19 -` - -func TestMarshalText(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, newTestMessage()); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != text { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) - } -} - -func TestMarshalTextCustomMessage(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, &textMessage{}); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != "custom" { - t.Errorf("Got %q, expected %q", s, "custom") - } -} -func TestMarshalTextNil(t *testing.T) { - want := "" - tests := []proto.Message{nil, (*pb.MyMessage)(nil)} - for i, test := range tests { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, test); err != nil { - t.Fatal(err) - } - if got := buf.String(); got != want { - t.Errorf("%d: got %q want %q", i, got, want) - } - } -} - -func TestMarshalTextUnknownEnum(t *testing.T) { - // The Color enum only specifies values 0-2. - m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} - got := m.String() - const want = `bikeshed:3 ` - if got != want { - t.Errorf("\n got %q\nwant %q", got, want) - } -} - -func TestTextOneof(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&pb.Communique{}, ``}, - // scalar field - {&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`}, - // message field - {&pb.Communique{Union: &pb.Communique_Msg{ - &pb.Strings{StringField: proto.String("why hello!")}, - }}, `msg:`}, - // bad oneof (should not panic) - {&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`}, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} - -func BenchmarkMarshalTextBuffered(b *testing.B) { - buf := new(bytes.Buffer) - m := newTestMessage() - for i := 0; i < b.N; i++ { - buf.Reset() - proto.MarshalText(buf, m) - } -} - -func BenchmarkMarshalTextUnbuffered(b *testing.B) { - w := ioutil.Discard - m := newTestMessage() - for i := 0; i < b.N; i++ { - proto.MarshalText(w, m) - } -} - -func compact(src string) string { - // s/[ \n]+/ /g; s/ $//; - dst := make([]byte, len(src)) - space, comment := false, false - j := 0 - for i := 0; i < len(src); i++ { - if strings.HasPrefix(src[i:], "/*") { - comment = true - i++ - continue - } - if comment && strings.HasPrefix(src[i:], "*/") { - comment = false - i++ - continue - } - if comment { - continue - } - c := src[i] - if c == ' ' || c == '\n' { - space = true - continue - } - if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { - space = false - } - if c == '{' { - space = false - } - if space { - dst[j] = ' ' - j++ - space = false - } - dst[j] = c - j++ - } - if space { - dst[j] = ' ' - j++ - } - return string(dst[0:j]) -} - -var compactText = compact(text) - -func TestCompactText(t *testing.T) { - s := proto.CompactTextString(newTestMessage()) - if s != compactText { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) - } -} - -func TestStringEscaping(t *testing.T) { - testCases := []struct { - in *pb.Strings - out string - }{ - { - // Test data from C++ test (TextFormatTest.StringEscape). - // Single divergence: we don't escape apostrophes. - &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, - "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", - }, - { - // Test data from the same C++ test. - &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, - "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", - }, - { - // Some UTF-8. - &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, - `string_field: "\000\001\377\201"` + "\n", - }, - } - - for i, tc := range testCases { - var buf bytes.Buffer - if err := proto.MarshalText(&buf, tc.in); err != nil { - t.Errorf("proto.MarsalText: %v", err) - continue - } - s := buf.String() - if s != tc.out { - t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) - continue - } - - // Check round-trip. - pb := new(pb.Strings) - if err := proto.UnmarshalText(s, pb); err != nil { - t.Errorf("#%d: UnmarshalText: %v", i, err) - continue - } - if !proto.Equal(pb, tc.in) { - t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) - } - } -} - -// A limitedWriter accepts some output before it fails. -// This is a proxy for something like a nearly-full or imminently-failing disk, -// or a network connection that is about to die. -type limitedWriter struct { - b bytes.Buffer - limit int -} - -var outOfSpace = errors.New("proto: insufficient space") - -func (w *limitedWriter) Write(p []byte) (n int, err error) { - var avail = w.limit - w.b.Len() - if avail <= 0 { - return 0, outOfSpace - } - if len(p) <= avail { - return w.b.Write(p) - } - n, _ = w.b.Write(p[:avail]) - return n, outOfSpace -} - -func TestMarshalTextFailing(t *testing.T) { - // Try lots of different sizes to exercise more error code-paths. - for lim := 0; lim < len(text); lim++ { - buf := new(limitedWriter) - buf.limit = lim - err := proto.MarshalText(buf, newTestMessage()) - // We expect a certain error, but also some partial results in the buffer. - if err != outOfSpace { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) - } - s := buf.b.String() - x := text[:buf.limit] - if s != x { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) - } - } -} - -func TestFloats(t *testing.T) { - tests := []struct { - f float64 - want string - }{ - {0, "0"}, - {4.7, "4.7"}, - {math.Inf(1), "inf"}, - {math.Inf(-1), "-inf"}, - {math.NaN(), "nan"}, - } - for _, test := range tests { - msg := &pb.FloatingPoint{F: &test.f} - got := strings.TrimSpace(msg.String()) - want := `f:` + test.want - if got != want { - t.Errorf("f=%f: got %q, want %q", test.f, got, want) - } - } -} - -func TestRepeatedNilText(t *testing.T) { - m := &pb.MessageList{ - Message: []*pb.MessageList_Message{ - nil, - &pb.MessageList_Message{ - Name: proto.String("Horse"), - }, - nil, - }, - } - want := `Message -Message { - name: "Horse" -} -Message -` - if s := proto.MarshalTextString(m); s != want { - t.Errorf(" got: %s\nwant: %s", s, want) - } -} - -func TestProto3Text(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&proto3pb.Message{}, ``}, - // zero message except for an empty byte slice - {&proto3pb.Message{Data: []byte{}}, ``}, - // trivial case - {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, - // empty map - {&pb.MessageWithMap{}, ``}, - // non-empty map; map format is the same as a repeated struct, - // and they are sorted by key (numerically for numeric keys). - { - &pb.MessageWithMap{NameMapping: map[int32]string{ - -1: "Negatory", - 7: "Lucky", - 1234: "Feist", - 6345789: "Otis", - }}, - `name_mapping: ` + - `name_mapping: ` + - `name_mapping: ` + - `name_mapping:`, - }, - // map with nil value; not well-defined, but we shouldn't crash - { - &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, - `msg_mapping:`, - }, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} - -func TestRacyMarshal(t *testing.T) { - // This test should be run with the race detector. - - any := &pb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} - proto.SetExtension(any, pb.E_Ext_Text, proto.String("bar")) - b, err := proto.Marshal(any) - if err != nil { - panic(err) - } - m := &proto3pb.Message{ - Name: "David", - ResultCount: 47, - Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any), Value: b}, - } - - wantText := proto.MarshalTextString(m) - wantBytes, err := proto.Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal error: %v", err) - } - - var wg sync.WaitGroup - defer wg.Wait() - wg.Add(20) - for i := 0; i < 10; i++ { - go func() { - defer wg.Done() - got := proto.MarshalTextString(m) - if got != wantText { - t.Errorf("proto.MarshalTextString = %q, want %q", got, wantText) - } - }() - go func() { - defer wg.Done() - got, err := proto.Marshal(m) - if !bytes.Equal(got, wantBytes) || err != nil { - t.Errorf("proto.Marshal = (%x, %v), want (%x, nil)", got, err, wantBytes) - } - }() - } -} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 00000000..d7c28da5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 00000000..398e3485 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go deleted file mode 100644 index e855b1f5..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ /dev/null @@ -1,2812 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/descriptor.proto - -package descriptor // import "github.com/golang/protobuf/protoc-gen-go/descriptor" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} -func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(value) - return nil -} -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 0} -} - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} -func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(value) - return nil -} -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} -func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(value) - return nil -} -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10, 0} -} - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} -func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") - if err != nil { - return err - } - *x = FieldOptions_CType(value) - return nil -} -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 0} -} - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -var FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", -} -var FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, -} - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} -func (x FieldOptions_JSType) String() string { - return proto.EnumName(FieldOptions_JSType_name, int32(x)) -} -func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") - if err != nil { - return err - } - *x = FieldOptions_JSType(value) - return nil -} -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 1} -} - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 -) - -var MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", -} -var MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, -} - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} -func (x MethodOptions_IdempotencyLevel) String() string { - return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) -} -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(value) - return nil -} -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{0} -} -func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) -} -func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) -} -func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorSet.Merge(dst, src) -} -func (m *FileDescriptorSet) XXX_Size() int { - return xxx_messageInfo_FileDescriptorSet.Size(m) -} -func (m *FileDescriptorSet) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo - -func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if m != nil { - return m.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{1} -} -func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) -} -func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) -} -func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorProto.Merge(dst, src) -} -func (m *FileDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FileDescriptorProto.Size(m) -} -func (m *FileDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo - -func (m *FileDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FileDescriptorProto) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *FileDescriptorProto) GetDependency() []string { - if m != nil { - return m.Dependency - } - return nil -} - -func (m *FileDescriptorProto) GetPublicDependency() []int32 { - if m != nil { - return m.PublicDependency - } - return nil -} - -func (m *FileDescriptorProto) GetWeakDependency() []int32 { - if m != nil { - return m.WeakDependency - } - return nil -} - -func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if m != nil { - return m.MessageType - } - return nil -} - -func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if m != nil { - return m.Service - } - return nil -} - -func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *FileDescriptorProto) GetOptions() *FileOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if m != nil { - return m.SourceCodeInfo - } - return nil -} - -func (m *FileDescriptorProto) GetSyntax() string { - if m != nil && m.Syntax != nil { - return *m.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2} -} -func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) -} -func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) -} -func (dst *DescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto.Merge(dst, src) -} -func (m *DescriptorProto) XXX_Size() int { - return xxx_messageInfo_DescriptorProto.Size(m) -} -func (m *DescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo - -func (m *DescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *DescriptorProto) GetField() []*FieldDescriptorProto { - if m != nil { - return m.Field - } - return nil -} - -func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *DescriptorProto) GetNestedType() []*DescriptorProto { - if m != nil { - return m.NestedType - } - return nil -} - -func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if m != nil { - return m.ExtensionRange - } - return nil -} - -func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if m != nil { - return m.OneofDecl - } - return nil -} - -func (m *DescriptorProto) GetOptions() *MessageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *DescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } -func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 0} -} -func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) -} -func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src) -} -func (m *DescriptorProto_ExtensionRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) -} -func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo - -func (m *DescriptorProto_ExtensionRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if m != nil { - return m.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } -func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ReservedRange) ProtoMessage() {} -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 1} -} -func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) -} -func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src) -} -func (m *DescriptorProto_ReservedRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) -} -func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo - -func (m *DescriptorProto_ReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -type ExtensionRangeOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{3} -} - -var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ExtensionRangeOptions -} -func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) -} -func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) -} -func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src) -} -func (m *ExtensionRangeOptions) XXX_Size() int { - return xxx_messageInfo_ExtensionRangeOptions.Size(m) -} -func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo - -func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4} -} -func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) -} -func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) -} -func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldDescriptorProto.Merge(dst, src) -} -func (m *FieldDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FieldDescriptorProto.Size(m) -} -func (m *FieldDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo - -func (m *FieldDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FieldDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if m != nil && m.Label != nil { - return *m.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (m *FieldDescriptorProto) GetTypeName() string { - if m != nil && m.TypeName != nil { - return *m.TypeName - } - return "" -} - -func (m *FieldDescriptorProto) GetExtendee() string { - if m != nil && m.Extendee != nil { - return *m.Extendee - } - return "" -} - -func (m *FieldDescriptorProto) GetDefaultValue() string { - if m != nil && m.DefaultValue != nil { - return *m.DefaultValue - } - return "" -} - -func (m *FieldDescriptorProto) GetOneofIndex() int32 { - if m != nil && m.OneofIndex != nil { - return *m.OneofIndex - } - return 0 -} - -func (m *FieldDescriptorProto) GetJsonName() string { - if m != nil && m.JsonName != nil { - return *m.JsonName - } - return "" -} - -func (m *FieldDescriptorProto) GetOptions() *FieldOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a oneof. -type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{5} -} -func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) -} -func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) -} -func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofDescriptorProto.Merge(dst, src) -} -func (m *OneofDescriptorProto) XXX_Size() int { - return xxx_messageInfo_OneofDescriptorProto.Size(m) -} -func (m *OneofDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo - -func (m *OneofDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6} -} -func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) -} -func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) -} -func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto.Merge(dst, src) -} -func (m *EnumDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto.Size(m) -} -func (m *EnumDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo - -func (m *EnumDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if m != nil { - return m.Value - } - return nil -} - -func (m *EnumDescriptorProto) GetOptions() *EnumOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -// Range of reserved numeric values. Reserved values may not be used by -// entries in the same enum. Reserved ranges may not overlap. -// -// Note that this is distinct from DescriptorProto.ReservedRange in that it -// is inclusive such that it can appropriately represent the entire int32 -// domain. -type EnumDescriptorProto_EnumReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } -func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} -func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6, 0} -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) -} -func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo - -func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{7} -} -func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) -} -func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) -} -func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src) -} -func (m *EnumValueDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumValueDescriptorProto.Size(m) -} -func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo - -func (m *EnumValueDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumValueDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{8} -} -func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) -} -func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) -} -func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src) -} -func (m *ServiceDescriptorProto) XXX_Size() int { - return xxx_messageInfo_ServiceDescriptorProto.Size(m) -} -func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo - -func (m *ServiceDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if m != nil { - return m.Method - } - return nil -} - -func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{9} -} -func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) -} -func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) -} -func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodDescriptorProto.Merge(dst, src) -} -func (m *MethodDescriptorProto) XXX_Size() int { - return xxx_messageInfo_MethodDescriptorProto.Size(m) -} -func (m *MethodDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo - -const Default_MethodDescriptorProto_ClientStreaming bool = false -const Default_MethodDescriptorProto_ServerStreaming bool = false - -func (m *MethodDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MethodDescriptorProto) GetInputType() string { - if m != nil && m.InputType != nil { - return *m.InputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOutputType() string { - if m != nil && m.OutputType != nil { - return *m.OutputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOptions() *MethodOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *MethodDescriptorProto) GetClientStreaming() bool { - if m != nil && m.ClientStreaming != nil { - return *m.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (m *MethodDescriptorProto) GetServerStreaming() bool { - if m != nil && m.ServerStreaming != nil { - return *m.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10} -} - -var extRange_FileOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} -func (m *FileOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileOptions.Unmarshal(m, b) -} -func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) -} -func (dst *FileOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileOptions.Merge(dst, src) -} -func (m *FileOptions) XXX_Size() int { - return xxx_messageInfo_FileOptions.Size(m) -} -func (m *FileOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FileOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FileOptions proto.InternalMessageInfo - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaStringCheckUtf8 bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false -const Default_FileOptions_PhpGenericServices bool = false -const Default_FileOptions_Deprecated bool = false -const Default_FileOptions_CcEnableArenas bool = false - -func (m *FileOptions) GetJavaPackage() string { - if m != nil && m.JavaPackage != nil { - return *m.JavaPackage - } - return "" -} - -func (m *FileOptions) GetJavaOuterClassname() string { - if m != nil && m.JavaOuterClassname != nil { - return *m.JavaOuterClassname - } - return "" -} - -func (m *FileOptions) GetJavaMultipleFiles() bool { - if m != nil && m.JavaMultipleFiles != nil { - return *m.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -// Deprecated: Do not use. -func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if m != nil && m.JavaGenerateEqualsAndHash != nil { - return *m.JavaGenerateEqualsAndHash - } - return false -} - -func (m *FileOptions) GetJavaStringCheckUtf8() bool { - if m != nil && m.JavaStringCheckUtf8 != nil { - return *m.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if m != nil && m.OptimizeFor != nil { - return *m.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (m *FileOptions) GetGoPackage() string { - if m != nil && m.GoPackage != nil { - return *m.GoPackage - } - return "" -} - -func (m *FileOptions) GetCcGenericServices() bool { - if m != nil && m.CcGenericServices != nil { - return *m.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (m *FileOptions) GetJavaGenericServices() bool { - if m != nil && m.JavaGenericServices != nil { - return *m.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (m *FileOptions) GetPyGenericServices() bool { - if m != nil && m.PyGenericServices != nil { - return *m.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (m *FileOptions) GetPhpGenericServices() bool { - if m != nil && m.PhpGenericServices != nil { - return *m.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (m *FileOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (m *FileOptions) GetCcEnableArenas() bool { - if m != nil && m.CcEnableArenas != nil { - return *m.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (m *FileOptions) GetObjcClassPrefix() string { - if m != nil && m.ObjcClassPrefix != nil { - return *m.ObjcClassPrefix - } - return "" -} - -func (m *FileOptions) GetCsharpNamespace() string { - if m != nil && m.CsharpNamespace != nil { - return *m.CsharpNamespace - } - return "" -} - -func (m *FileOptions) GetSwiftPrefix() string { - if m != nil && m.SwiftPrefix != nil { - return *m.SwiftPrefix - } - return "" -} - -func (m *FileOptions) GetPhpClassPrefix() string { - if m != nil && m.PhpClassPrefix != nil { - return *m.PhpClassPrefix - } - return "" -} - -func (m *FileOptions) GetPhpNamespace() string { - if m != nil && m.PhpNamespace != nil { - return *m.PhpNamespace - } - return "" -} - -func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{11} -} - -var extRange_MessageOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} -func (m *MessageOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageOptions.Unmarshal(m, b) -} -func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) -} -func (dst *MessageOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageOptions.Merge(dst, src) -} -func (m *MessageOptions) XXX_Size() int { - return xxx_messageInfo_MessageOptions.Size(m) -} -func (m *MessageOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MessageOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageOptions proto.InternalMessageInfo - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false -const Default_MessageOptions_Deprecated bool = false - -func (m *MessageOptions) GetMessageSetWireFormat() bool { - if m != nil && m.MessageSetWireFormat != nil { - return *m.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if m != nil && m.NoStandardDescriptorAccessor != nil { - return *m.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (m *MessageOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (m *MessageOptions) GetMapEntry() bool { - if m != nil && m.MapEntry != nil { - return *m.MapEntry - } - return false -} - -func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12} -} - -var extRange_FieldOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} -func (m *FieldOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldOptions.Unmarshal(m, b) -} -func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) -} -func (dst *FieldOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldOptions.Merge(dst, src) -} -func (m *FieldOptions) XXX_Size() int { - return xxx_messageInfo_FieldOptions.Size(m) -} -func (m *FieldOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FieldOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldOptions proto.InternalMessageInfo - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (m *FieldOptions) GetCtype() FieldOptions_CType { - if m != nil && m.Ctype != nil { - return *m.Ctype - } - return Default_FieldOptions_Ctype -} - -func (m *FieldOptions) GetPacked() bool { - if m != nil && m.Packed != nil { - return *m.Packed - } - return false -} - -func (m *FieldOptions) GetJstype() FieldOptions_JSType { - if m != nil && m.Jstype != nil { - return *m.Jstype - } - return Default_FieldOptions_Jstype -} - -func (m *FieldOptions) GetLazy() bool { - if m != nil && m.Lazy != nil { - return *m.Lazy - } - return Default_FieldOptions_Lazy -} - -func (m *FieldOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (m *FieldOptions) GetWeak() bool { - if m != nil && m.Weak != nil { - return *m.Weak - } - return Default_FieldOptions_Weak -} - -func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{13} -} - -var extRange_OneofOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} -func (m *OneofOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofOptions.Unmarshal(m, b) -} -func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) -} -func (dst *OneofOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofOptions.Merge(dst, src) -} -func (m *OneofOptions) XXX_Size() int { - return xxx_messageInfo_OneofOptions.Size(m) -} -func (m *OneofOptions) XXX_DiscardUnknown() { - xxx_messageInfo_OneofOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofOptions proto.InternalMessageInfo - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{14} -} - -var extRange_EnumOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} -func (m *EnumOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumOptions.Unmarshal(m, b) -} -func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) -} -func (dst *EnumOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumOptions.Merge(dst, src) -} -func (m *EnumOptions) XXX_Size() int { - return xxx_messageInfo_EnumOptions.Size(m) -} -func (m *EnumOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumOptions proto.InternalMessageInfo - -const Default_EnumOptions_Deprecated bool = false - -func (m *EnumOptions) GetAllowAlias() bool { - if m != nil && m.AllowAlias != nil { - return *m.AllowAlias - } - return false -} - -func (m *EnumOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{15} -} - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} -func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) -} -func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) -} -func (dst *EnumValueOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueOptions.Merge(dst, src) -} -func (m *EnumValueOptions) XXX_Size() int { - return xxx_messageInfo_EnumValueOptions.Size(m) -} -func (m *EnumValueOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo - -const Default_EnumValueOptions_Deprecated bool = false - -func (m *EnumValueOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{16} -} - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} -func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) -} -func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) -} -func (dst *ServiceOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceOptions.Merge(dst, src) -} -func (m *ServiceOptions) XXX_Size() int { - return xxx_messageInfo_ServiceOptions.Size(m) -} -func (m *ServiceOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo - -const Default_ServiceOptions_Deprecated bool = false - -func (m *ServiceOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17} -} - -var extRange_MethodOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} -func (m *MethodOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodOptions.Unmarshal(m, b) -} -func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) -} -func (dst *MethodOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodOptions.Merge(dst, src) -} -func (m *MethodOptions) XXX_Size() int { - return xxx_messageInfo_MethodOptions.Size(m) -} -func (m *MethodOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MethodOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodOptions proto.InternalMessageInfo - -const Default_MethodOptions_Deprecated bool = false -const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN - -func (m *MethodOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if m != nil && m.IdempotencyLevel != nil { - return *m.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18} -} -func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) -} -func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) -} -func (dst *UninterpretedOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption.Merge(dst, src) -} -func (m *UninterpretedOption) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption.Size(m) -} -func (m *UninterpretedOption) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo - -func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if m != nil { - return m.Name - } - return nil -} - -func (m *UninterpretedOption) GetIdentifierValue() string { - if m != nil && m.IdentifierValue != nil { - return *m.IdentifierValue - } - return "" -} - -func (m *UninterpretedOption) GetPositiveIntValue() uint64 { - if m != nil && m.PositiveIntValue != nil { - return *m.PositiveIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetNegativeIntValue() int64 { - if m != nil && m.NegativeIntValue != nil { - return *m.NegativeIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *UninterpretedOption) GetStringValue() []byte { - if m != nil { - return m.StringValue - } - return nil -} - -func (m *UninterpretedOption) GetAggregateValue() string { - if m != nil && m.AggregateValue != nil { - return *m.AggregateValue - } - return "" -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } -func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18, 0} -} -func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) -} -func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) -} -func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src) -} -func (m *UninterpretedOption_NamePart) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) -} -func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo - -func (m *UninterpretedOption_NamePart) GetNamePart() string { - if m != nil && m.NamePart != nil { - return *m.NamePart - } - return "" -} - -func (m *UninterpretedOption_NamePart) GetIsExtension() bool { - if m != nil && m.IsExtension != nil { - return *m.IsExtension - } - return false -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19} -} -func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) -} -func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) -} -func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo.Merge(dst, src) -} -func (m *SourceCodeInfo) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo.Size(m) -} -func (m *SourceCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo - -func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if m != nil { - return m.Location - } - return nil -} - -type SourceCodeInfo_Location struct { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19, 0} -} -func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) -} -func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) -} -func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src) -} -func (m *SourceCodeInfo_Location) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo_Location.Size(m) -} -func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo - -func (m *SourceCodeInfo_Location) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *SourceCodeInfo_Location) GetSpan() []int32 { - if m != nil { - return m.Span - } - return nil -} - -func (m *SourceCodeInfo_Location) GetLeadingComments() string { - if m != nil && m.LeadingComments != nil { - return *m.LeadingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetTrailingComments() string { - if m != nil && m.TrailingComments != nil { - return *m.TrailingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if m != nil { - return m.LeadingDetachedComments - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20} -} -func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) -} -func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src) -} -func (m *GeneratedCodeInfo) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo.Size(m) -} -func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo - -func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if m != nil { - return m.Annotation - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } -func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20, 0} -} -func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) -} -func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) -} -func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo - -func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if m != nil && m.SourceFile != nil { - return *m.SourceFile - } - return "" -} - -func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if m != nil && m.Begin != nil { - return *m.Begin - } - return 0 -} - -func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func init() { - proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") - proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") - proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") - proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") - proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") - proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") - proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") - proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") - proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") - proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") - proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") - proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") - proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") - proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") - proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") - proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") - proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") - proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") - proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") - proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") - proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") - proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") - proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") - proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") - proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") - proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) -} - -func init() { - proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_descriptor_4df4cb5f42392df6) -} - -var fileDescriptor_descriptor_4df4cb5f42392df6 = []byte{ - // 2555 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, - 0xf5, 0xcf, 0xf2, 0x4b, 0xe4, 0x21, 0x45, 0x8d, 0x46, 0x8a, 0xbd, 0x56, 0x3e, 0x2c, 0x33, 0x1f, - 0x96, 0x9d, 0x7f, 0xa8, 0xc0, 0xb1, 0x1d, 0x47, 0xfe, 0x23, 0x2d, 0x45, 0xae, 0x15, 0xaa, 0x12, - 0xc9, 0x2e, 0xa9, 0xe6, 0x03, 0x28, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, - 0xb4, 0xad, 0xa0, 0x17, 0x06, 0x7a, 0xd5, 0xab, 0xde, 0x16, 0x45, 0xd1, 0x8b, 0xde, 0x04, 0xe8, - 0x03, 0x14, 0xc8, 0x5d, 0x9f, 0xa0, 0x40, 0xde, 0xa0, 0x68, 0x0b, 0xb4, 0x8f, 0xd0, 0xcb, 0x62, - 0x66, 0x76, 0x97, 0xbb, 0x24, 0x15, 0x2b, 0x01, 0xe2, 0x5c, 0x91, 0xf3, 0x9b, 0xdf, 0x39, 0x73, - 0xe6, 0xcc, 0x99, 0x33, 0x67, 0x66, 0x61, 0x7b, 0xe4, 0x38, 0x23, 0x8b, 0xee, 0xba, 0x9e, 0x13, - 0x38, 0xa7, 0xd3, 0xe1, 0xae, 0x41, 0x7d, 0xdd, 0x33, 0xdd, 0xc0, 0xf1, 0xea, 0x1c, 0xc3, 0x6b, - 0x82, 0x51, 0x8f, 0x18, 0xb5, 0x63, 0x58, 0x7f, 0x60, 0x5a, 0xb4, 0x15, 0x13, 0xfb, 0x34, 0xc0, - 0xf7, 0x20, 0x37, 0x34, 0x2d, 0x2a, 0x4b, 0xdb, 0xd9, 0x9d, 0xf2, 0xad, 0x37, 0xeb, 0x73, 0x42, - 0xf5, 0xb4, 0x44, 0x8f, 0xc1, 0x2a, 0x97, 0xa8, 0xfd, 0x2b, 0x07, 0x1b, 0x4b, 0x7a, 0x31, 0x86, - 0x9c, 0x4d, 0x26, 0x4c, 0xa3, 0xb4, 0x53, 0x52, 0xf9, 0x7f, 0x2c, 0xc3, 0x8a, 0x4b, 0xf4, 0x47, - 0x64, 0x44, 0xe5, 0x0c, 0x87, 0xa3, 0x26, 0x7e, 0x1d, 0xc0, 0xa0, 0x2e, 0xb5, 0x0d, 0x6a, 0xeb, - 0x67, 0x72, 0x76, 0x3b, 0xbb, 0x53, 0x52, 0x13, 0x08, 0x7e, 0x07, 0xd6, 0xdd, 0xe9, 0xa9, 0x65, - 0xea, 0x5a, 0x82, 0x06, 0xdb, 0xd9, 0x9d, 0xbc, 0x8a, 0x44, 0x47, 0x6b, 0x46, 0xbe, 0x0e, 0x6b, - 0x4f, 0x28, 0x79, 0x94, 0xa4, 0x96, 0x39, 0xb5, 0xca, 0xe0, 0x04, 0xb1, 0x09, 0x95, 0x09, 0xf5, - 0x7d, 0x32, 0xa2, 0x5a, 0x70, 0xe6, 0x52, 0x39, 0xc7, 0x67, 0xbf, 0xbd, 0x30, 0xfb, 0xf9, 0x99, - 0x97, 0x43, 0xa9, 0xc1, 0x99, 0x4b, 0x71, 0x03, 0x4a, 0xd4, 0x9e, 0x4e, 0x84, 0x86, 0xfc, 0x39, - 0xfe, 0x53, 0xec, 0xe9, 0x64, 0x5e, 0x4b, 0x91, 0x89, 0x85, 0x2a, 0x56, 0x7c, 0xea, 0x3d, 0x36, - 0x75, 0x2a, 0x17, 0xb8, 0x82, 0xeb, 0x0b, 0x0a, 0xfa, 0xa2, 0x7f, 0x5e, 0x47, 0x24, 0x87, 0x9b, - 0x50, 0xa2, 0x4f, 0x03, 0x6a, 0xfb, 0xa6, 0x63, 0xcb, 0x2b, 0x5c, 0xc9, 0x5b, 0x4b, 0x56, 0x91, - 0x5a, 0xc6, 0xbc, 0x8a, 0x99, 0x1c, 0xbe, 0x0b, 0x2b, 0x8e, 0x1b, 0x98, 0x8e, 0xed, 0xcb, 0xc5, - 0x6d, 0x69, 0xa7, 0x7c, 0xeb, 0xd5, 0xa5, 0x81, 0xd0, 0x15, 0x1c, 0x35, 0x22, 0xe3, 0x36, 0x20, - 0xdf, 0x99, 0x7a, 0x3a, 0xd5, 0x74, 0xc7, 0xa0, 0x9a, 0x69, 0x0f, 0x1d, 0xb9, 0xc4, 0x15, 0x5c, - 0x5d, 0x9c, 0x08, 0x27, 0x36, 0x1d, 0x83, 0xb6, 0xed, 0xa1, 0xa3, 0x56, 0xfd, 0x54, 0x1b, 0x5f, - 0x82, 0x82, 0x7f, 0x66, 0x07, 0xe4, 0xa9, 0x5c, 0xe1, 0x11, 0x12, 0xb6, 0x6a, 0x5f, 0x17, 0x60, - 0xed, 0x22, 0x21, 0x76, 0x1f, 0xf2, 0x43, 0x36, 0x4b, 0x39, 0xf3, 0x5d, 0x7c, 0x20, 0x64, 0xd2, - 0x4e, 0x2c, 0x7c, 0x4f, 0x27, 0x36, 0xa0, 0x6c, 0x53, 0x3f, 0xa0, 0x86, 0x88, 0x88, 0xec, 0x05, - 0x63, 0x0a, 0x84, 0xd0, 0x62, 0x48, 0xe5, 0xbe, 0x57, 0x48, 0x7d, 0x0a, 0x6b, 0xb1, 0x49, 0x9a, - 0x47, 0xec, 0x51, 0x14, 0x9b, 0xbb, 0xcf, 0xb3, 0xa4, 0xae, 0x44, 0x72, 0x2a, 0x13, 0x53, 0xab, - 0x34, 0xd5, 0xc6, 0x2d, 0x00, 0xc7, 0xa6, 0xce, 0x50, 0x33, 0xa8, 0x6e, 0xc9, 0xc5, 0x73, 0xbc, - 0xd4, 0x65, 0x94, 0x05, 0x2f, 0x39, 0x02, 0xd5, 0x2d, 0xfc, 0xe1, 0x2c, 0xd4, 0x56, 0xce, 0x89, - 0x94, 0x63, 0xb1, 0xc9, 0x16, 0xa2, 0xed, 0x04, 0xaa, 0x1e, 0x65, 0x71, 0x4f, 0x8d, 0x70, 0x66, - 0x25, 0x6e, 0x44, 0xfd, 0xb9, 0x33, 0x53, 0x43, 0x31, 0x31, 0xb1, 0x55, 0x2f, 0xd9, 0xc4, 0x6f, - 0x40, 0x0c, 0x68, 0x3c, 0xac, 0x80, 0x67, 0xa1, 0x4a, 0x04, 0x76, 0xc8, 0x84, 0x6e, 0x7d, 0x09, - 0xd5, 0xb4, 0x7b, 0xf0, 0x26, 0xe4, 0xfd, 0x80, 0x78, 0x01, 0x8f, 0xc2, 0xbc, 0x2a, 0x1a, 0x18, - 0x41, 0x96, 0xda, 0x06, 0xcf, 0x72, 0x79, 0x95, 0xfd, 0xc5, 0x3f, 0x9d, 0x4d, 0x38, 0xcb, 0x27, - 0xfc, 0xf6, 0xe2, 0x8a, 0xa6, 0x34, 0xcf, 0xcf, 0x7b, 0xeb, 0x03, 0x58, 0x4d, 0x4d, 0xe0, 0xa2, - 0x43, 0xd7, 0x7e, 0x05, 0x2f, 0x2f, 0x55, 0x8d, 0x3f, 0x85, 0xcd, 0xa9, 0x6d, 0xda, 0x01, 0xf5, - 0x5c, 0x8f, 0xb2, 0x88, 0x15, 0x43, 0xc9, 0xff, 0x5e, 0x39, 0x27, 0xe6, 0x4e, 0x92, 0x6c, 0xa1, - 0x45, 0xdd, 0x98, 0x2e, 0x82, 0x37, 0x4b, 0xc5, 0xff, 0xac, 0xa0, 0x67, 0xcf, 0x9e, 0x3d, 0xcb, - 0xd4, 0x7e, 0x57, 0x80, 0xcd, 0x65, 0x7b, 0x66, 0xe9, 0xf6, 0xbd, 0x04, 0x05, 0x7b, 0x3a, 0x39, - 0xa5, 0x1e, 0x77, 0x52, 0x5e, 0x0d, 0x5b, 0xb8, 0x01, 0x79, 0x8b, 0x9c, 0x52, 0x4b, 0xce, 0x6d, - 0x4b, 0x3b, 0xd5, 0x5b, 0xef, 0x5c, 0x68, 0x57, 0xd6, 0x8f, 0x98, 0x88, 0x2a, 0x24, 0xf1, 0x47, - 0x90, 0x0b, 0x53, 0x34, 0xd3, 0x70, 0xf3, 0x62, 0x1a, 0xd8, 0x5e, 0x52, 0xb9, 0x1c, 0x7e, 0x05, - 0x4a, 0xec, 0x57, 0xc4, 0x46, 0x81, 0xdb, 0x5c, 0x64, 0x00, 0x8b, 0x0b, 0xbc, 0x05, 0x45, 0xbe, - 0x4d, 0x0c, 0x1a, 0x1d, 0x6d, 0x71, 0x9b, 0x05, 0x96, 0x41, 0x87, 0x64, 0x6a, 0x05, 0xda, 0x63, - 0x62, 0x4d, 0x29, 0x0f, 0xf8, 0x92, 0x5a, 0x09, 0xc1, 0x5f, 0x30, 0x0c, 0x5f, 0x85, 0xb2, 0xd8, - 0x55, 0xa6, 0x6d, 0xd0, 0xa7, 0x3c, 0x7b, 0xe6, 0x55, 0xb1, 0xd1, 0xda, 0x0c, 0x61, 0xc3, 0x3f, - 0xf4, 0x1d, 0x3b, 0x0a, 0x4d, 0x3e, 0x04, 0x03, 0xf8, 0xf0, 0x1f, 0xcc, 0x27, 0xee, 0xd7, 0x96, - 0x4f, 0x6f, 0x3e, 0xa6, 0x6a, 0x7f, 0xc9, 0x40, 0x8e, 0xe7, 0x8b, 0x35, 0x28, 0x0f, 0x3e, 0xeb, - 0x29, 0x5a, 0xab, 0x7b, 0xb2, 0x7f, 0xa4, 0x20, 0x09, 0x57, 0x01, 0x38, 0xf0, 0xe0, 0xa8, 0xdb, - 0x18, 0xa0, 0x4c, 0xdc, 0x6e, 0x77, 0x06, 0x77, 0x6f, 0xa3, 0x6c, 0x2c, 0x70, 0x22, 0x80, 0x5c, - 0x92, 0xf0, 0xfe, 0x2d, 0x94, 0xc7, 0x08, 0x2a, 0x42, 0x41, 0xfb, 0x53, 0xa5, 0x75, 0xf7, 0x36, - 0x2a, 0xa4, 0x91, 0xf7, 0x6f, 0xa1, 0x15, 0xbc, 0x0a, 0x25, 0x8e, 0xec, 0x77, 0xbb, 0x47, 0xa8, - 0x18, 0xeb, 0xec, 0x0f, 0xd4, 0x76, 0xe7, 0x00, 0x95, 0x62, 0x9d, 0x07, 0x6a, 0xf7, 0xa4, 0x87, - 0x20, 0xd6, 0x70, 0xac, 0xf4, 0xfb, 0x8d, 0x03, 0x05, 0x95, 0x63, 0xc6, 0xfe, 0x67, 0x03, 0xa5, - 0x8f, 0x2a, 0x29, 0xb3, 0xde, 0xbf, 0x85, 0x56, 0xe3, 0x21, 0x94, 0xce, 0xc9, 0x31, 0xaa, 0xe2, - 0x75, 0x58, 0x15, 0x43, 0x44, 0x46, 0xac, 0xcd, 0x41, 0x77, 0x6f, 0x23, 0x34, 0x33, 0x44, 0x68, - 0x59, 0x4f, 0x01, 0x77, 0x6f, 0x23, 0x5c, 0x6b, 0x42, 0x9e, 0x47, 0x17, 0xc6, 0x50, 0x3d, 0x6a, - 0xec, 0x2b, 0x47, 0x5a, 0xb7, 0x37, 0x68, 0x77, 0x3b, 0x8d, 0x23, 0x24, 0xcd, 0x30, 0x55, 0xf9, - 0xf9, 0x49, 0x5b, 0x55, 0x5a, 0x28, 0x93, 0xc4, 0x7a, 0x4a, 0x63, 0xa0, 0xb4, 0x50, 0xb6, 0xa6, - 0xc3, 0xe6, 0xb2, 0x3c, 0xb9, 0x74, 0x67, 0x24, 0x96, 0x38, 0x73, 0xce, 0x12, 0x73, 0x5d, 0x0b, - 0x4b, 0xfc, 0xcf, 0x0c, 0x6c, 0x2c, 0x39, 0x2b, 0x96, 0x0e, 0xf2, 0x13, 0xc8, 0x8b, 0x10, 0x15, - 0xa7, 0xe7, 0x8d, 0xa5, 0x87, 0x0e, 0x0f, 0xd8, 0x85, 0x13, 0x94, 0xcb, 0x25, 0x2b, 0x88, 0xec, - 0x39, 0x15, 0x04, 0x53, 0xb1, 0x90, 0xd3, 0x7f, 0xb9, 0x90, 0xd3, 0xc5, 0xb1, 0x77, 0xf7, 0x22, - 0xc7, 0x1e, 0xc7, 0xbe, 0x5b, 0x6e, 0xcf, 0x2f, 0xc9, 0xed, 0xf7, 0x61, 0x7d, 0x41, 0xd1, 0x85, - 0x73, 0xec, 0xaf, 0x25, 0x90, 0xcf, 0x73, 0xce, 0x73, 0x32, 0x5d, 0x26, 0x95, 0xe9, 0xee, 0xcf, - 0x7b, 0xf0, 0xda, 0xf9, 0x8b, 0xb0, 0xb0, 0xd6, 0x5f, 0x49, 0x70, 0x69, 0x79, 0xa5, 0xb8, 0xd4, - 0x86, 0x8f, 0xa0, 0x30, 0xa1, 0xc1, 0xd8, 0x89, 0xaa, 0xa5, 0xb7, 0x97, 0x9c, 0xc1, 0xac, 0x7b, - 0x7e, 0xb1, 0x43, 0xa9, 0xe4, 0x21, 0x9e, 0x3d, 0xaf, 0xdc, 0x13, 0xd6, 0x2c, 0x58, 0xfa, 0x9b, - 0x0c, 0xbc, 0xbc, 0x54, 0xf9, 0x52, 0x43, 0x5f, 0x03, 0x30, 0x6d, 0x77, 0x1a, 0x88, 0x8a, 0x48, - 0x24, 0xd8, 0x12, 0x47, 0x78, 0xf2, 0x62, 0xc9, 0x73, 0x1a, 0xc4, 0xfd, 0x59, 0xde, 0x0f, 0x02, - 0xe2, 0x84, 0x7b, 0x33, 0x43, 0x73, 0xdc, 0xd0, 0xd7, 0xcf, 0x99, 0xe9, 0x42, 0x60, 0xbe, 0x07, - 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x4c, 0x4c, 0x7b, 0xc4, 0x4f, 0x90, 0xe2, - 0x5e, 0x7e, 0x48, 0x2c, 0x9f, 0xaa, 0x6b, 0xa2, 0xbb, 0x1f, 0xf5, 0x32, 0x09, 0x1e, 0x40, 0x5e, - 0x42, 0xa2, 0x90, 0x92, 0x10, 0xdd, 0xb1, 0x44, 0xed, 0xeb, 0x22, 0x94, 0x13, 0x75, 0x35, 0xbe, - 0x06, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0xf0, 0x44, 0x99, 0x61, 0xbd, 0xf0, 0xbe, - 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0xee, 0xb4, 0x22, - 0xa7, 0x62, 0xd6, 0xd7, 0x65, 0x5d, 0xcd, 0xa8, 0x07, 0xdf, 0x81, 0x0d, 0x2e, 0x31, 0x99, 0x5a, - 0x81, 0xe9, 0x5a, 0x54, 0x63, 0xb7, 0x37, 0x9f, 0x9f, 0x24, 0xb1, 0x65, 0xeb, 0x8c, 0x71, 0x1c, - 0x12, 0x98, 0x45, 0x3e, 0x6e, 0xc1, 0x6b, 0x5c, 0x6c, 0x44, 0x6d, 0xea, 0x91, 0x80, 0x6a, 0xf4, - 0x8b, 0x29, 0xb1, 0x7c, 0x8d, 0xd8, 0x86, 0x36, 0x26, 0xfe, 0x58, 0xde, 0x64, 0x0a, 0xf6, 0x33, - 0xb2, 0xa4, 0x5e, 0x61, 0xc4, 0x83, 0x90, 0xa7, 0x70, 0x5a, 0xc3, 0x36, 0x3e, 0x26, 0xfe, 0x18, - 0xef, 0xc1, 0x25, 0xae, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x69, 0xfa, 0x98, 0xea, 0x8f, 0xb4, 0x69, - 0x30, 0xbc, 0x27, 0xbf, 0x92, 0x1c, 0x9f, 0x5b, 0xd8, 0xe7, 0x9c, 0x26, 0xa3, 0x9c, 0x04, 0xc3, - 0x7b, 0xb8, 0x0f, 0x15, 0xb6, 0x18, 0x13, 0xf3, 0x4b, 0xaa, 0x0d, 0x1d, 0x8f, 0x1f, 0x8d, 0xd5, - 0x25, 0xa9, 0x29, 0xe1, 0xc1, 0x7a, 0x37, 0x14, 0x38, 0x76, 0x0c, 0xba, 0x97, 0xef, 0xf7, 0x14, - 0xa5, 0xa5, 0x96, 0x23, 0x2d, 0x0f, 0x1c, 0x8f, 0x05, 0xd4, 0xc8, 0x89, 0x1d, 0x5c, 0x16, 0x01, - 0x35, 0x72, 0x22, 0xf7, 0xde, 0x81, 0x0d, 0x5d, 0x17, 0x73, 0x36, 0x75, 0x2d, 0xbc, 0x63, 0xf9, - 0x32, 0x4a, 0x39, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0x8c, 0x71, 0x1f, 0x7f, 0x08, 0x2f, 0xcf, 0x9c, - 0x95, 0x14, 0x5c, 0x5f, 0x98, 0xe5, 0xbc, 0xe8, 0x1d, 0xd8, 0x70, 0xcf, 0x16, 0x05, 0x71, 0x6a, - 0x44, 0xf7, 0x6c, 0x5e, 0xec, 0x03, 0xd8, 0x74, 0xc7, 0xee, 0xa2, 0xdc, 0xcd, 0xa4, 0x1c, 0x76, - 0xc7, 0xee, 0xbc, 0xe0, 0x5b, 0xfc, 0xc2, 0xed, 0x51, 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x27, 0xe9, - 0x89, 0x0e, 0xbc, 0x0b, 0x48, 0xd7, 0x35, 0x6a, 0x93, 0x53, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1, - 0xe5, 0xab, 0x49, 0x72, 0x55, 0xd7, 0x15, 0xde, 0xdb, 0xe0, 0x9d, 0xf8, 0x26, 0xac, 0x3b, 0xa7, - 0x0f, 0x75, 0x11, 0x92, 0x9a, 0xeb, 0xd1, 0xa1, 0xf9, 0x54, 0x7e, 0x93, 0xfb, 0x77, 0x8d, 0x75, - 0xf0, 0x80, 0xec, 0x71, 0x18, 0xdf, 0x00, 0xa4, 0xfb, 0x63, 0xe2, 0xb9, 0x3c, 0x27, 0xfb, 0x2e, - 0xd1, 0xa9, 0xfc, 0x96, 0xa0, 0x0a, 0xbc, 0x13, 0xc1, 0x6c, 0x4b, 0xf8, 0x4f, 0xcc, 0x61, 0x10, - 0x69, 0xbc, 0x2e, 0xb6, 0x04, 0xc7, 0x42, 0x6d, 0x3b, 0x80, 0x98, 0x2b, 0x52, 0x03, 0xef, 0x70, - 0x5a, 0xd5, 0x1d, 0xbb, 0xc9, 0x71, 0xdf, 0x80, 0x55, 0xc6, 0x9c, 0x0d, 0x7a, 0x43, 0x14, 0x64, - 0xee, 0x38, 0x31, 0xe2, 0x0f, 0x56, 0x1b, 0xd7, 0xf6, 0xa0, 0x92, 0x8c, 0x4f, 0x5c, 0x02, 0x11, - 0xa1, 0x48, 0x62, 0xc5, 0x4a, 0xb3, 0xdb, 0x62, 0x65, 0xc6, 0xe7, 0x0a, 0xca, 0xb0, 0x72, 0xe7, - 0xa8, 0x3d, 0x50, 0x34, 0xf5, 0xa4, 0x33, 0x68, 0x1f, 0x2b, 0x28, 0x9b, 0xa8, 0xab, 0x0f, 0x73, - 0xc5, 0xb7, 0xd1, 0xf5, 0xda, 0x37, 0x19, 0xa8, 0xa6, 0x2f, 0x4a, 0xf8, 0xff, 0xe1, 0x72, 0xf4, - 0xaa, 0xe1, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0xe3, 0x4c, 0x88, 0x38, 0xc4, 0xe2, 0xa5, 0xdb, - 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0x5b, 0x4c, 0x48, 0x80, 0x8f, 0xe0, 0xaa, 0xed, - 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0x4f, 0xd2, 0x88, 0xae, 0x53, 0xdf, 0x77, - 0xc4, 0x81, 0x15, 0x6b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x96, 0xc9, 0x1b, 0x21, 0x75, 0x2e, - 0xcc, 0xb2, 0xe7, 0x85, 0xd9, 0x2b, 0x50, 0x9a, 0x10, 0x57, 0xa3, 0x76, 0xe0, 0x9d, 0xf1, 0xf2, - 0xb8, 0xa8, 0x16, 0x27, 0xc4, 0x55, 0x58, 0xfb, 0x85, 0xdc, 0x52, 0x0e, 0x73, 0xc5, 0x22, 0x2a, - 0x1d, 0xe6, 0x8a, 0x25, 0x04, 0xb5, 0x7f, 0x64, 0xa1, 0x92, 0x2c, 0x97, 0xd9, 0xed, 0x43, 0xe7, - 0x27, 0x8b, 0xc4, 0x73, 0xcf, 0x1b, 0xdf, 0x5a, 0x5c, 0xd7, 0x9b, 0xec, 0xc8, 0xd9, 0x2b, 0x88, - 0x22, 0x56, 0x15, 0x92, 0xec, 0xb8, 0x67, 0xd9, 0x86, 0x8a, 0xa2, 0xa1, 0xa8, 0x86, 0x2d, 0x7c, - 0x00, 0x85, 0x87, 0x3e, 0xd7, 0x5d, 0xe0, 0xba, 0xdf, 0xfc, 0x76, 0xdd, 0x87, 0x7d, 0xae, 0xbc, - 0x74, 0xd8, 0xd7, 0x3a, 0x5d, 0xf5, 0xb8, 0x71, 0xa4, 0x86, 0xe2, 0xf8, 0x0a, 0xe4, 0x2c, 0xf2, - 0xe5, 0x59, 0xfa, 0x70, 0xe2, 0xd0, 0x45, 0x17, 0xe1, 0x0a, 0xe4, 0x9e, 0x50, 0xf2, 0x28, 0x7d, - 0x24, 0x70, 0xe8, 0x07, 0xdc, 0x0c, 0xbb, 0x90, 0xe7, 0xfe, 0xc2, 0x00, 0xa1, 0xc7, 0xd0, 0x4b, - 0xb8, 0x08, 0xb9, 0x66, 0x57, 0x65, 0x1b, 0x02, 0x41, 0x45, 0xa0, 0x5a, 0xaf, 0xad, 0x34, 0x15, - 0x94, 0xa9, 0xdd, 0x81, 0x82, 0x70, 0x02, 0xdb, 0x2c, 0xb1, 0x1b, 0xd0, 0x4b, 0x61, 0x33, 0xd4, - 0x21, 0x45, 0xbd, 0x27, 0xc7, 0xfb, 0x8a, 0x8a, 0x32, 0xe9, 0xa5, 0xce, 0xa1, 0x7c, 0xcd, 0x87, - 0x4a, 0xb2, 0x5e, 0x7e, 0x31, 0x77, 0xe1, 0xbf, 0x4a, 0x50, 0x4e, 0xd4, 0xbf, 0xac, 0x70, 0x21, - 0x96, 0xe5, 0x3c, 0xd1, 0x88, 0x65, 0x12, 0x3f, 0x0c, 0x0d, 0xe0, 0x50, 0x83, 0x21, 0x17, 0x5d, - 0xba, 0x17, 0xb4, 0x45, 0xf2, 0xa8, 0x50, 0xfb, 0xa3, 0x04, 0x68, 0xbe, 0x00, 0x9d, 0x33, 0x53, - 0xfa, 0x31, 0xcd, 0xac, 0xfd, 0x41, 0x82, 0x6a, 0xba, 0xea, 0x9c, 0x33, 0xef, 0xda, 0x8f, 0x6a, - 0xde, 0xdf, 0x33, 0xb0, 0x9a, 0xaa, 0x35, 0x2f, 0x6a, 0xdd, 0x17, 0xb0, 0x6e, 0x1a, 0x74, 0xe2, - 0x3a, 0x01, 0xb5, 0xf5, 0x33, 0xcd, 0xa2, 0x8f, 0xa9, 0x25, 0xd7, 0x78, 0xd2, 0xd8, 0xfd, 0xf6, - 0x6a, 0xb6, 0xde, 0x9e, 0xc9, 0x1d, 0x31, 0xb1, 0xbd, 0x8d, 0x76, 0x4b, 0x39, 0xee, 0x75, 0x07, - 0x4a, 0xa7, 0xf9, 0x99, 0x76, 0xd2, 0xf9, 0x59, 0xa7, 0xfb, 0x49, 0x47, 0x45, 0xe6, 0x1c, 0xed, - 0x07, 0xdc, 0xf6, 0x3d, 0x40, 0xf3, 0x46, 0xe1, 0xcb, 0xb0, 0xcc, 0x2c, 0xf4, 0x12, 0xde, 0x80, - 0xb5, 0x4e, 0x57, 0xeb, 0xb7, 0x5b, 0x8a, 0xa6, 0x3c, 0x78, 0xa0, 0x34, 0x07, 0x7d, 0xf1, 0x3e, - 0x11, 0xb3, 0x07, 0xa9, 0x0d, 0x5e, 0xfb, 0x7d, 0x16, 0x36, 0x96, 0x58, 0x82, 0x1b, 0xe1, 0xcd, - 0x42, 0x5c, 0x76, 0xde, 0xbd, 0x88, 0xf5, 0x75, 0x56, 0x10, 0xf4, 0x88, 0x17, 0x84, 0x17, 0x91, - 0x1b, 0xc0, 0xbc, 0x64, 0x07, 0xe6, 0xd0, 0xa4, 0x5e, 0xf8, 0x9c, 0x23, 0xae, 0x1b, 0x6b, 0x33, - 0x5c, 0xbc, 0xe8, 0xfc, 0x1f, 0x60, 0xd7, 0xf1, 0xcd, 0xc0, 0x7c, 0x4c, 0x35, 0xd3, 0x8e, 0xde, - 0x7e, 0xd8, 0xf5, 0x23, 0xa7, 0xa2, 0xa8, 0xa7, 0x6d, 0x07, 0x31, 0xdb, 0xa6, 0x23, 0x32, 0xc7, - 0x66, 0xc9, 0x3c, 0xab, 0xa2, 0xa8, 0x27, 0x66, 0x5f, 0x83, 0x8a, 0xe1, 0x4c, 0x59, 0x4d, 0x26, - 0x78, 0xec, 0xec, 0x90, 0xd4, 0xb2, 0xc0, 0x62, 0x4a, 0x58, 0x6d, 0xcf, 0x1e, 0x9d, 0x2a, 0x6a, - 0x59, 0x60, 0x82, 0x72, 0x1d, 0xd6, 0xc8, 0x68, 0xe4, 0x31, 0xe5, 0x91, 0x22, 0x71, 0x7f, 0xa8, - 0xc6, 0x30, 0x27, 0x6e, 0x1d, 0x42, 0x31, 0xf2, 0x03, 0x3b, 0xaa, 0x99, 0x27, 0x34, 0x57, 0x5c, - 0x8a, 0x33, 0x3b, 0x25, 0xb5, 0x68, 0x47, 0x9d, 0xd7, 0xa0, 0x62, 0xfa, 0xda, 0xec, 0x0d, 0x3d, - 0xb3, 0x9d, 0xd9, 0x29, 0xaa, 0x65, 0xd3, 0x8f, 0xdf, 0x1f, 0x6b, 0x5f, 0x65, 0xa0, 0x9a, 0xfe, - 0x06, 0x80, 0x5b, 0x50, 0xb4, 0x1c, 0x9d, 0xf0, 0xd0, 0x12, 0x1f, 0xa0, 0x76, 0x9e, 0xf3, 0xd9, - 0xa0, 0x7e, 0x14, 0xf2, 0xd5, 0x58, 0x72, 0xeb, 0x6f, 0x12, 0x14, 0x23, 0x18, 0x5f, 0x82, 0x9c, - 0x4b, 0x82, 0x31, 0x57, 0x97, 0xdf, 0xcf, 0x20, 0x49, 0xe5, 0x6d, 0x86, 0xfb, 0x2e, 0xb1, 0x79, - 0x08, 0x84, 0x38, 0x6b, 0xb3, 0x75, 0xb5, 0x28, 0x31, 0xf8, 0xe5, 0xc4, 0x99, 0x4c, 0xa8, 0x1d, - 0xf8, 0xd1, 0xba, 0x86, 0x78, 0x33, 0x84, 0xf1, 0x3b, 0xb0, 0x1e, 0x78, 0xc4, 0xb4, 0x52, 0xdc, - 0x1c, 0xe7, 0xa2, 0xa8, 0x23, 0x26, 0xef, 0xc1, 0x95, 0x48, 0xaf, 0x41, 0x03, 0xa2, 0x8f, 0xa9, - 0x31, 0x13, 0x2a, 0xf0, 0x47, 0x88, 0xcb, 0x21, 0xa1, 0x15, 0xf6, 0x47, 0xb2, 0xb5, 0x6f, 0x24, - 0x58, 0x8f, 0xae, 0x53, 0x46, 0xec, 0xac, 0x63, 0x00, 0x62, 0xdb, 0x4e, 0x90, 0x74, 0xd7, 0x62, - 0x28, 0x2f, 0xc8, 0xd5, 0x1b, 0xb1, 0x90, 0x9a, 0x50, 0xb0, 0x35, 0x01, 0x98, 0xf5, 0x9c, 0xeb, - 0xb6, 0xab, 0x50, 0x0e, 0x3f, 0xf0, 0xf0, 0xaf, 0x84, 0xe2, 0x02, 0x0e, 0x02, 0x62, 0xf7, 0x2e, - 0xbc, 0x09, 0xf9, 0x53, 0x3a, 0x32, 0xed, 0xf0, 0xd9, 0x56, 0x34, 0xa2, 0x67, 0x92, 0x5c, 0xfc, - 0x4c, 0xb2, 0xff, 0x5b, 0x09, 0x36, 0x74, 0x67, 0x32, 0x6f, 0xef, 0x3e, 0x9a, 0x7b, 0x05, 0xf0, - 0x3f, 0x96, 0x3e, 0xff, 0x68, 0x64, 0x06, 0xe3, 0xe9, 0x69, 0x5d, 0x77, 0x26, 0xbb, 0x23, 0xc7, - 0x22, 0xf6, 0x68, 0xf6, 0x99, 0x93, 0xff, 0xd1, 0xdf, 0x1d, 0x51, 0xfb, 0xdd, 0x91, 0x93, 0xf8, - 0xe8, 0x79, 0x7f, 0xf6, 0xf7, 0xbf, 0x92, 0xf4, 0xa7, 0x4c, 0xf6, 0xa0, 0xb7, 0xff, 0xe7, 0xcc, - 0xd6, 0x81, 0x18, 0xae, 0x17, 0xb9, 0x47, 0xa5, 0x43, 0x8b, 0xea, 0x6c, 0xca, 0xff, 0x0b, 0x00, - 0x00, 0xff, 0xff, 0x1a, 0x28, 0x25, 0x79, 0x42, 0x1d, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto deleted file mode 100644 index 8697a50d..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto +++ /dev/null @@ -1,872 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// Based on original Protocol Buffers design by -// Sanjay Ghemawat, Jeff Dean, and others. -// -// The messages in this file describe the definitions found in .proto files. -// A valid .proto file can be translated directly to a FileDescriptorProto -// without any other information (e.g. without reading its imports). - - -syntax = "proto2"; - -package google.protobuf; -option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DescriptorProtos"; -option csharp_namespace = "Google.Protobuf.Reflection"; -option objc_class_prefix = "GPB"; -option cc_enable_arenas = true; - -// descriptor.proto must be optimized for speed because reflection-based -// algorithms don't work during bootstrapping. -option optimize_for = SPEED; - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -message FileDescriptorSet { - repeated FileDescriptorProto file = 1; -} - -// Describes a complete .proto file. -message FileDescriptorProto { - optional string name = 1; // file name, relative to root of source tree - optional string package = 2; // e.g. "foo", "foo.bar", etc. - - // Names of files imported by this file. - repeated string dependency = 3; - // Indexes of the public imported files in the dependency list above. - repeated int32 public_dependency = 10; - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - repeated int32 weak_dependency = 11; - - // All top-level definitions in this file. - repeated DescriptorProto message_type = 4; - repeated EnumDescriptorProto enum_type = 5; - repeated ServiceDescriptorProto service = 6; - repeated FieldDescriptorProto extension = 7; - - optional FileOptions options = 8; - - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - optional SourceCodeInfo source_code_info = 9; - - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - optional string syntax = 12; -} - -// Describes a message type. -message DescriptorProto { - optional string name = 1; - - repeated FieldDescriptorProto field = 2; - repeated FieldDescriptorProto extension = 6; - - repeated DescriptorProto nested_type = 3; - repeated EnumDescriptorProto enum_type = 4; - - message ExtensionRange { - optional int32 start = 1; - optional int32 end = 2; - - optional ExtensionRangeOptions options = 3; - } - repeated ExtensionRange extension_range = 5; - - repeated OneofDescriptorProto oneof_decl = 8; - - optional MessageOptions options = 7; - - // Range of reserved tag numbers. Reserved tag numbers may not be used by - // fields or extension ranges in the same message. Reserved ranges may - // not overlap. - message ReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. - } - repeated ReservedRange reserved_range = 9; - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - repeated string reserved_name = 10; -} - -message ExtensionRangeOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -// Describes a field within a message. -message FieldDescriptorProto { - enum Type { - // 0 is reserved for errors. - // Order is weird for historical reasons. - TYPE_DOUBLE = 1; - TYPE_FLOAT = 2; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - TYPE_INT64 = 3; - TYPE_UINT64 = 4; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - TYPE_INT32 = 5; - TYPE_FIXED64 = 6; - TYPE_FIXED32 = 7; - TYPE_BOOL = 8; - TYPE_STRING = 9; - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - TYPE_GROUP = 10; - TYPE_MESSAGE = 11; // Length-delimited aggregate. - - // New in version 2. - TYPE_BYTES = 12; - TYPE_UINT32 = 13; - TYPE_ENUM = 14; - TYPE_SFIXED32 = 15; - TYPE_SFIXED64 = 16; - TYPE_SINT32 = 17; // Uses ZigZag encoding. - TYPE_SINT64 = 18; // Uses ZigZag encoding. - }; - - enum Label { - // 0 is reserved for errors - LABEL_OPTIONAL = 1; - LABEL_REQUIRED = 2; - LABEL_REPEATED = 3; - }; - - optional string name = 1; - optional int32 number = 3; - optional Label label = 4; - - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - optional Type type = 5; - - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - optional string type_name = 6; - - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - optional string extendee = 2; - - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - optional string default_value = 7; - - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - optional int32 oneof_index = 9; - - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - optional string json_name = 10; - - optional FieldOptions options = 8; -} - -// Describes a oneof. -message OneofDescriptorProto { - optional string name = 1; - optional OneofOptions options = 2; -} - -// Describes an enum type. -message EnumDescriptorProto { - optional string name = 1; - - repeated EnumValueDescriptorProto value = 2; - - optional EnumOptions options = 3; - - // Range of reserved numeric values. Reserved values may not be used by - // entries in the same enum. Reserved ranges may not overlap. - // - // Note that this is distinct from DescriptorProto.ReservedRange in that it - // is inclusive such that it can appropriately represent the entire int32 - // domain. - message EnumReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Inclusive. - } - - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - repeated EnumReservedRange reserved_range = 4; - - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - repeated string reserved_name = 5; -} - -// Describes a value within an enum. -message EnumValueDescriptorProto { - optional string name = 1; - optional int32 number = 2; - - optional EnumValueOptions options = 3; -} - -// Describes a service. -message ServiceDescriptorProto { - optional string name = 1; - repeated MethodDescriptorProto method = 2; - - optional ServiceOptions options = 3; -} - -// Describes a method of a service. -message MethodDescriptorProto { - optional string name = 1; - - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - optional string input_type = 2; - optional string output_type = 3; - - optional MethodOptions options = 4; - - // Identifies if client streams multiple client messages - optional bool client_streaming = 5 [default=false]; - // Identifies if server streams multiple server messages - optional bool server_streaming = 6 [default=false]; -} - - -// =================================================================== -// Options - -// Each of the definitions above may have "options" attached. These are -// just annotations which may cause code to be generated slightly differently -// or may contain hints for code that manipulates protocol messages. -// -// Clients may define custom options as extensions of the *Options messages. -// These extensions may not yet be known at parsing time, so the parser cannot -// store the values in them. Instead it stores them in a field in the *Options -// message called uninterpreted_option. This field must have the same name -// across all *Options messages. We then use this field to populate the -// extensions when we build a descriptor, at which point all protos have been -// parsed and so all extensions are known. -// -// Extension numbers for custom options may be chosen as follows: -// * For options which will only be used within a single application or -// organization, or for experimental options, use field numbers 50000 -// through 99999. It is up to you to ensure that you do not use the -// same number for multiple options. -// * For options which will be published and used publicly by multiple -// independent entities, e-mail protobuf-global-extension-registry@google.com -// to reserve extension numbers. Simply provide your project name (e.g. -// Objective-C plugin) and your project website (if available) -- there's no -// need to explain how you intend to use them. Usually you only need one -// extension number. You can declare multiple options with only one extension -// number by putting them in a sub-message. See the Custom Options section of -// the docs for examples: -// https://developers.google.com/protocol-buffers/docs/proto#options -// If this turns out to be popular, a web service will be set up -// to automatically assign option numbers. - - -message FileOptions { - - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - optional string java_package = 1; - - - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - optional string java_outer_classname = 8; - - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - optional bool java_multiple_files = 10 [default=false]; - - // This option does nothing. - optional bool java_generate_equals_and_hash = 20 [deprecated=true]; - - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - optional bool java_string_check_utf8 = 27 [default=false]; - - - // Generated classes can be optimized for speed or code size. - enum OptimizeMode { - SPEED = 1; // Generate complete code for parsing, serialization, - // etc. - CODE_SIZE = 2; // Use ReflectionOps to implement these methods. - LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. - } - optional OptimizeMode optimize_for = 9 [default=SPEED]; - - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - optional string go_package = 11; - - - - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - optional bool cc_generic_services = 16 [default=false]; - optional bool java_generic_services = 17 [default=false]; - optional bool py_generic_services = 18 [default=false]; - optional bool php_generic_services = 42 [default=false]; - - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - optional bool deprecated = 23 [default=false]; - - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - optional bool cc_enable_arenas = 31 [default=false]; - - - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - optional string objc_class_prefix = 36; - - // Namespace for generated classes; defaults to the package. - optional string csharp_namespace = 37; - - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - optional string swift_prefix = 39; - - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - optional string php_class_prefix = 40; - - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - optional string php_namespace = 41; - - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. - // See the documentation for the "Options" section above. - extensions 1000 to max; - - reserved 38; -} - -message MessageOptions { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - optional bool message_set_wire_format = 1 [default=false]; - - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - optional bool no_standard_descriptor_accessor = 2 [default=false]; - - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - optional bool deprecated = 3 [default=false]; - - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - optional bool map_entry = 7; - - reserved 8; // javalite_serializable - reserved 9; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message FieldOptions { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - optional CType ctype = 1 [default = STRING]; - enum CType { - // Default mode. - STRING = 0; - - CORD = 1; - - STRING_PIECE = 2; - } - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - optional bool packed = 2; - - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - optional JSType jstype = 6 [default = JS_NORMAL]; - enum JSType { - // Use the default type. - JS_NORMAL = 0; - - // Use JavaScript strings. - JS_STRING = 1; - - // Use JavaScript numbers. - JS_NUMBER = 2; - } - - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - optional bool lazy = 5 [default=false]; - - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - optional bool deprecated = 3 [default=false]; - - // For Google-internal migration only. Do not use. - optional bool weak = 10 [default=false]; - - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; - - reserved 4; // removed jtype -} - -message OneofOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumOptions { - - // Set this option to true to allow mapping different tag names to the same - // value. - optional bool allow_alias = 2; - - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - optional bool deprecated = 3 [default=false]; - - reserved 5; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumValueOptions { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - optional bool deprecated = 1 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message ServiceOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - optional bool deprecated = 33 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message MethodOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - optional bool deprecated = 33 [default=false]; - - // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - // or neither? HTTP based RPC implementation may choose GET verb for safe - // methods, and PUT verb for idempotent methods instead of the default POST. - enum IdempotencyLevel { - IDEMPOTENCY_UNKNOWN = 0; - NO_SIDE_EFFECTS = 1; // implies idempotent - IDEMPOTENT = 2; // idempotent, but may have side effects - } - optional IdempotencyLevel idempotency_level = - 34 [default=IDEMPOTENCY_UNKNOWN]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -message UninterpretedOption { - // The name of the uninterpreted option. Each string represents a segment in - // a dot-separated name. is_extension is true iff a segment represents an - // extension (denoted with parentheses in options specs in .proto files). - // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents - // "foo.(bar.baz).qux". - message NamePart { - required string name_part = 1; - required bool is_extension = 2; - } - repeated NamePart name = 2; - - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - optional string identifier_value = 3; - optional uint64 positive_int_value = 4; - optional int64 negative_int_value = 5; - optional double double_value = 6; - optional bytes string_value = 7; - optional string aggregate_value = 8; -} - -// =================================================================== -// Optional source code info - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -message SourceCodeInfo { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - repeated Location location = 1; - message Location { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - repeated int32 path = 1 [packed=true]; - - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - repeated int32 span = 2 [packed=true]; - - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - optional string leading_comments = 3; - optional string trailing_comments = 4; - repeated string leading_detached_comments = 6; - } -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -message GeneratedCodeInfo { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - repeated Annotation annotation = 1; - message Annotation { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - repeated int32 path = 1 [packed=true]; - - // Identifies the filesystem path to the original source .proto. - optional string source_file = 2; - - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - optional int32 begin = 3; - - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - optional int32 end = 4; - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go deleted file mode 100644 index 0d6055d6..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go +++ /dev/null @@ -1,51 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* - A plugin for the Google protocol buffer compiler to generate Go code. - Run it by building this program and putting it in your path with the name - protoc-gen-go - That word 'go' at the end becomes part of the option string set for the - protocol compiler, so once the protocol compiler (protoc) is installed - you can run - protoc --go_out=output_directory input_directory/file.proto - to generate Go bindings for the protocol defined by file.proto. - With that input, the output will be written to - output_directory/file.pb.go - - The generated code is documented in the package comment for - the library. - - See the README and documentation for protocol buffers to learn more: - https://developers.google.com/protocol-buffers/ - -*/ -package documentation diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go deleted file mode 100644 index c13a9f1e..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go +++ /dev/null @@ -1,3086 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* - The code generator for the plugin for the Google protocol buffer compiler. - It generates Go code from the protocol buffer description files read by the - main routine. -*/ -package generator - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/sha256" - "encoding/hex" - "fmt" - "go/build" - "go/parser" - "go/printer" - "go/token" - "log" - "os" - "path" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap" - - "github.com/golang/protobuf/protoc-gen-go/descriptor" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" -) - -// generatedCodeVersion indicates a version of the generated code. -// It is incremented whenever an incompatibility between the generated code and -// proto package is introduced; the generated code references -// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). -const generatedCodeVersion = 2 - -// A Plugin provides functionality to add to the output during Go code generation, -// such as to produce RPC stubs. -type Plugin interface { - // Name identifies the plugin. - Name() string - // Init is called once after data structures are built but before - // code generation begins. - Init(g *Generator) - // Generate produces the code generated by the plugin for this file, - // except for the imports, by calling the generator's methods P, In, and Out. - Generate(file *FileDescriptor) - // GenerateImports produces the import declarations for this file. - // It is called after Generate. - GenerateImports(file *FileDescriptor) -} - -var plugins []Plugin - -// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. -// It is typically called during initialization. -func RegisterPlugin(p Plugin) { - plugins = append(plugins, p) -} - -// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". -type GoImportPath string - -func (p GoImportPath) String() string { return strconv.Quote(string(p)) } - -// A GoPackageName is the name of a Go package. e.g., "protobuf". -type GoPackageName string - -// Each type we import as a protocol buffer (other than FileDescriptorProto) needs -// a pointer to the FileDescriptorProto that represents it. These types achieve that -// wrapping by placing each Proto inside a struct with the pointer to its File. The -// structs have the same names as their contents, with "Proto" removed. -// FileDescriptor is used to store the things that it points to. - -// The file and package name method are common to messages and enums. -type common struct { - file *FileDescriptor // File this object comes from. -} - -// GoImportPath is the import path of the Go package containing the type. -func (c *common) GoImportPath() GoImportPath { - return c.file.importPath -} - -func (c *common) File() *FileDescriptor { return c.file } - -func fileIsProto3(file *descriptor.FileDescriptorProto) bool { - return file.GetSyntax() == "proto3" -} - -func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } - -// Descriptor represents a protocol buffer message. -type Descriptor struct { - common - *descriptor.DescriptorProto - parent *Descriptor // The containing message, if any. - nested []*Descriptor // Inner messages, if any. - enums []*EnumDescriptor // Inner enums, if any. - ext []*ExtensionDescriptor // Extensions, if any. - typename []string // Cached typename vector. - index int // The index into the container, whether the file or another message. - path string // The SourceCodeInfo path as comma-separated integers. - group bool -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (d *Descriptor) TypeName() []string { - if d.typename != nil { - return d.typename - } - n := 0 - for parent := d; parent != nil; parent = parent.parent { - n++ - } - s := make([]string, n) - for parent := d; parent != nil; parent = parent.parent { - n-- - s[n] = parent.GetName() - } - d.typename = s - return s -} - -// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. -// Otherwise it will be the descriptor of the message in which it is defined. -type EnumDescriptor struct { - common - *descriptor.EnumDescriptorProto - parent *Descriptor // The containing message, if any. - typename []string // Cached typename vector. - index int // The index into the container, whether the file or a message. - path string // The SourceCodeInfo path as comma-separated integers. -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (e *EnumDescriptor) TypeName() (s []string) { - if e.typename != nil { - return e.typename - } - name := e.GetName() - if e.parent == nil { - s = make([]string, 1) - } else { - pname := e.parent.TypeName() - s = make([]string, len(pname)+1) - copy(s, pname) - } - s[len(s)-1] = name - e.typename = s - return s -} - -// Everything but the last element of the full type name, CamelCased. -// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . -func (e *EnumDescriptor) prefix() string { - if e.parent == nil { - // If the enum is not part of a message, the prefix is just the type name. - return CamelCase(*e.Name) + "_" - } - typeName := e.TypeName() - return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" -} - -// The integer value of the named constant in this enumerated type. -func (e *EnumDescriptor) integerValueAsString(name string) string { - for _, c := range e.Value { - if c.GetName() == name { - return fmt.Sprint(c.GetNumber()) - } - } - log.Fatal("cannot find value for enum constant") - return "" -} - -// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. -// Otherwise it will be the descriptor of the message in which it is defined. -type ExtensionDescriptor struct { - common - *descriptor.FieldDescriptorProto - parent *Descriptor // The containing message, if any. -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (e *ExtensionDescriptor) TypeName() (s []string) { - name := e.GetName() - if e.parent == nil { - // top-level extension - s = make([]string, 1) - } else { - pname := e.parent.TypeName() - s = make([]string, len(pname)+1) - copy(s, pname) - } - s[len(s)-1] = name - return s -} - -// DescName returns the variable name used for the generated descriptor. -func (e *ExtensionDescriptor) DescName() string { - // The full type name. - typeName := e.TypeName() - // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. - for i, s := range typeName { - typeName[i] = CamelCase(s) - } - return "E_" + strings.Join(typeName, "_") -} - -// ImportedDescriptor describes a type that has been publicly imported from another file. -type ImportedDescriptor struct { - common - o Object -} - -func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } - -// FileDescriptor describes an protocol buffer descriptor file (.proto). -// It includes slices of all the messages and enums defined within it. -// Those slices are constructed by WrapTypes. -type FileDescriptor struct { - *descriptor.FileDescriptorProto - desc []*Descriptor // All the messages defined in this file. - enum []*EnumDescriptor // All the enums defined in this file. - ext []*ExtensionDescriptor // All the top-level extensions defined in this file. - imp []*ImportedDescriptor // All types defined in files publicly imported by this file. - - // Comments, stored as a map of path (comma-separated integers) to the comment. - comments map[string]*descriptor.SourceCodeInfo_Location - - // The full list of symbols that are exported, - // as a map from the exported object to its symbols. - // This is used for supporting public imports. - exported map[Object][]symbol - - fingerprint string // Fingerprint of this file's contents. - importPath GoImportPath // Import path of this file's package. - packageName GoPackageName // Name of this file's Go package. - - proto3 bool // whether to generate proto3 code for this file -} - -// VarName is the variable name we'll use in the generated code to refer -// to the compressed bytes of this descriptor. It is not exported, so -// it is only valid inside the generated package. -func (d *FileDescriptor) VarName() string { - name := strings.Map(badToUnderscore, baseName(d.GetName())) - return fmt.Sprintf("fileDescriptor_%s_%s", name, d.fingerprint) -} - -// goPackageOption interprets the file's go_package option. -// If there is no go_package, it returns ("", "", false). -// If there's a simple name, it returns ("", pkg, true). -// If the option implies an import path, it returns (impPath, pkg, true). -func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { - opt := d.GetOptions().GetGoPackage() - if opt == "" { - return "", "", false - } - // A semicolon-delimited suffix delimits the import path and package name. - sc := strings.Index(opt, ";") - if sc >= 0 { - return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true - } - // The presence of a slash implies there's an import path. - slash := strings.LastIndex(opt, "/") - if slash >= 0 { - return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true - } - return "", cleanPackageName(opt), true -} - -// goFileName returns the output name for the generated Go file. -func (d *FileDescriptor) goFileName(pathType pathType) string { - name := *d.Name - if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { - name = name[:len(name)-len(ext)] - } - name += ".pb.go" - - if pathType == pathTypeSourceRelative { - return name - } - - // Does the file have a "go_package" option? - // If it does, it may override the filename. - if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { - // Replace the existing dirname with the declared import path. - _, name = path.Split(name) - name = path.Join(string(impPath), name) - return name - } - - return name -} - -func (d *FileDescriptor) addExport(obj Object, sym symbol) { - d.exported[obj] = append(d.exported[obj], sym) -} - -// symbol is an interface representing an exported Go symbol. -type symbol interface { - // GenerateAlias should generate an appropriate alias - // for the symbol from the named package. - GenerateAlias(g *Generator, pkg GoPackageName) -} - -type messageSymbol struct { - sym string - hasExtensions, isMessageSet bool - oneofTypes []string -} - -type getterSymbol struct { - name string - typ string - typeName string // canonical name in proto world; empty for proto.Message and similar - genType bool // whether typ contains a generated type (message/group/enum) -} - -func (ms *messageSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { - g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) - for _, name := range ms.oneofTypes { - g.P("type ", name, " = ", pkg, ".", name) - } -} - -type enumSymbol struct { - name string - proto3 bool // Whether this came from a proto3 file. -} - -func (es enumSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { - s := es.name - g.P("type ", s, " = ", pkg, ".", s) - g.P("var ", s, "_name = ", pkg, ".", s, "_name") - g.P("var ", s, "_value = ", pkg, ".", s, "_value") -} - -type constOrVarSymbol struct { - sym string - typ string // either "const" or "var" - cast string // if non-empty, a type cast is required (used for enums) -} - -func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { - v := string(pkg) + "." + cs.sym - if cs.cast != "" { - v = cs.cast + "(" + v + ")" - } - g.P(cs.typ, " ", cs.sym, " = ", v) -} - -// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. -type Object interface { - GoImportPath() GoImportPath - TypeName() []string - File() *FileDescriptor -} - -// Generator is the type whose methods generate the output, stored in the associated response structure. -type Generator struct { - *bytes.Buffer - - Request *plugin.CodeGeneratorRequest // The input. - Response *plugin.CodeGeneratorResponse // The output. - - Param map[string]string // Command-line parameters. - PackageImportPath string // Go import path of the package we're generating code for - ImportPrefix string // String to prefix to imported package file names. - ImportMap map[string]string // Mapping from .proto file name to import path - - Pkg map[string]string // The names under which we import support packages - - outputImportPath GoImportPath // Package we're generating code for. - allFiles []*FileDescriptor // All files in the tree - allFilesByName map[string]*FileDescriptor // All files by filename. - genFiles []*FileDescriptor // Those files we will generate output for. - file *FileDescriptor // The file we are compiling now. - packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. - usedPackages map[GoImportPath]bool // Packages used in current file. - usedPackageNames map[GoPackageName]bool // Package names used in the current file. - typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. - init []string // Lines to emit in the init function. - indent string - pathType pathType // How to generate output filenames. - writeOutput bool - annotateCode bool // whether to store annotations - annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store -} - -type pathType int - -const ( - pathTypeImport pathType = iota - pathTypeSourceRelative -) - -// New creates a new generator and allocates the request and response protobufs. -func New() *Generator { - g := new(Generator) - g.Buffer = new(bytes.Buffer) - g.Request = new(plugin.CodeGeneratorRequest) - g.Response = new(plugin.CodeGeneratorResponse) - return g -} - -// Error reports a problem, including an error, and exits the program. -func (g *Generator) Error(err error, msgs ...string) { - s := strings.Join(msgs, " ") + ":" + err.Error() - log.Print("protoc-gen-go: error:", s) - os.Exit(1) -} - -// Fail reports a problem and exits the program. -func (g *Generator) Fail(msgs ...string) { - s := strings.Join(msgs, " ") - log.Print("protoc-gen-go: error:", s) - os.Exit(1) -} - -// CommandLineParameters breaks the comma-separated list of key=value pairs -// in the parameter (a member of the request protobuf) into a key/value map. -// It then sets file name mappings defined by those entries. -func (g *Generator) CommandLineParameters(parameter string) { - g.Param = make(map[string]string) - for _, p := range strings.Split(parameter, ",") { - if i := strings.Index(p, "="); i < 0 { - g.Param[p] = "" - } else { - g.Param[p[0:i]] = p[i+1:] - } - } - - g.ImportMap = make(map[string]string) - pluginList := "none" // Default list of plugin names to enable (empty means all). - for k, v := range g.Param { - switch k { - case "import_prefix": - g.ImportPrefix = v - case "import_path": - g.PackageImportPath = v - case "paths": - switch v { - case "import": - g.pathType = pathTypeImport - case "source_relative": - g.pathType = pathTypeSourceRelative - default: - g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) - } - case "plugins": - pluginList = v - case "annotate_code": - if v == "true" { - g.annotateCode = true - } - default: - if len(k) > 0 && k[0] == 'M' { - g.ImportMap[k[1:]] = v - } - } - } - if pluginList != "" { - // Amend the set of plugins. - enabled := make(map[string]bool) - for _, name := range strings.Split(pluginList, "+") { - enabled[name] = true - } - var nplugins []Plugin - for _, p := range plugins { - if enabled[p.Name()] { - nplugins = append(nplugins, p) - } - } - plugins = nplugins - } -} - -// DefaultPackageName returns the package name printed for the object. -// If its file is in a different package, it returns the package name we're using for this file, plus ".". -// Otherwise it returns the empty string. -func (g *Generator) DefaultPackageName(obj Object) string { - importPath := obj.GoImportPath() - if importPath == g.outputImportPath { - return "" - } - return string(g.GoPackageName(importPath)) + "." -} - -// GoPackageName returns the name used for a package. -func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { - if name, ok := g.packageNames[importPath]; ok { - return name - } - name := cleanPackageName(baseName(string(importPath))) - for i, orig := 1, name; g.usedPackageNames[name]; i++ { - name = orig + GoPackageName(strconv.Itoa(i)) - } - g.packageNames[importPath] = name - g.usedPackageNames[name] = true - return name -} - -var globalPackageNames = map[GoPackageName]bool{ - "fmt": true, - "math": true, - "proto": true, -} - -// Create and remember a guaranteed unique package name. Pkg is the candidate name. -// The FileDescriptor parameter is unused. -func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { - name := cleanPackageName(pkg) - for i, orig := 1, name; globalPackageNames[name]; i++ { - name = orig + GoPackageName(strconv.Itoa(i)) - } - globalPackageNames[name] = true - return string(name) -} - -var isGoKeyword = map[string]bool{ - "break": true, - "case": true, - "chan": true, - "const": true, - "continue": true, - "default": true, - "else": true, - "defer": true, - "fallthrough": true, - "for": true, - "func": true, - "go": true, - "goto": true, - "if": true, - "import": true, - "interface": true, - "map": true, - "package": true, - "range": true, - "return": true, - "select": true, - "struct": true, - "switch": true, - "type": true, - "var": true, -} - -func cleanPackageName(name string) GoPackageName { - name = strings.Map(badToUnderscore, name) - // Identifier must not be keyword: insert _. - if isGoKeyword[name] { - name = "_" + name - } - // Identifier must not begin with digit: insert _. - if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { - name = "_" + name - } - return GoPackageName(name) -} - -// defaultGoPackage returns the package name to use, -// derived from the import path of the package we're building code for. -func (g *Generator) defaultGoPackage() GoPackageName { - p := g.PackageImportPath - if i := strings.LastIndex(p, "/"); i >= 0 { - p = p[i+1:] - } - return cleanPackageName(p) -} - -// SetPackageNames sets the package name for this run. -// The package name must agree across all files being generated. -// It also defines unique package names for all imported files. -func (g *Generator) SetPackageNames() { - g.outputImportPath = g.genFiles[0].importPath - - defaultPackageNames := make(map[GoImportPath]GoPackageName) - for _, f := range g.genFiles { - if _, p, ok := f.goPackageOption(); ok { - defaultPackageNames[f.importPath] = p - } - } - for _, f := range g.genFiles { - if _, p, ok := f.goPackageOption(); ok { - // Source file: option go_package = "quux/bar"; - f.packageName = p - } else if p, ok := defaultPackageNames[f.importPath]; ok { - // A go_package option in another file in the same package. - // - // This is a poor choice in general, since every source file should - // contain a go_package option. Supported mainly for historical - // compatibility. - f.packageName = p - } else if p := g.defaultGoPackage(); p != "" { - // Command-line: import_path=quux/bar. - // - // The import_path flag sets a package name for files which don't - // contain a go_package option. - f.packageName = p - } else if p := f.GetPackage(); p != "" { - // Source file: package quux.bar; - f.packageName = cleanPackageName(p) - } else { - // Source filename. - f.packageName = cleanPackageName(baseName(f.GetName())) - } - } - - // Check that all files have a consistent package name and import path. - for _, f := range g.genFiles[1:] { - if a, b := g.genFiles[0].importPath, f.importPath; a != b { - g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) - } - if a, b := g.genFiles[0].packageName, f.packageName; a != b { - g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) - } - } - - // Names of support packages. These never vary (if there are conflicts, - // we rename the conflicting package), so this could be removed someday. - g.Pkg = map[string]string{ - "fmt": "fmt", - "math": "math", - "proto": "proto", - } -} - -// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos -// and FileDescriptorProtos into file-referenced objects within the Generator. -// It also creates the list of files to generate and so should be called before GenerateAllFiles. -func (g *Generator) WrapTypes() { - g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) - g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) - genFileNames := make(map[string]bool) - for _, n := range g.Request.FileToGenerate { - genFileNames[n] = true - } - for _, f := range g.Request.ProtoFile { - fd := &FileDescriptor{ - FileDescriptorProto: f, - exported: make(map[Object][]symbol), - proto3: fileIsProto3(f), - } - // The import path may be set in a number of ways. - if substitution, ok := g.ImportMap[f.GetName()]; ok { - // Command-line: M=foo.proto=quux/bar. - // - // Explicit mapping of source file to import path. - fd.importPath = GoImportPath(substitution) - } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { - // Command-line: import_path=quux/bar. - // - // The import_path flag sets the import path for every file that - // we generate code for. - fd.importPath = GoImportPath(g.PackageImportPath) - } else if p, _, _ := fd.goPackageOption(); p != "" { - // Source file: option go_package = "quux/bar"; - // - // The go_package option sets the import path. Most users should use this. - fd.importPath = p - } else { - // Source filename. - // - // Last resort when nothing else is available. - fd.importPath = GoImportPath(path.Dir(f.GetName())) - } - // We must wrap the descriptors before we wrap the enums - fd.desc = wrapDescriptors(fd) - g.buildNestedDescriptors(fd.desc) - fd.enum = wrapEnumDescriptors(fd, fd.desc) - g.buildNestedEnums(fd.desc, fd.enum) - fd.ext = wrapExtensions(fd) - extractComments(fd) - g.allFiles = append(g.allFiles, fd) - g.allFilesByName[f.GetName()] = fd - } - for _, fd := range g.allFiles { - fd.imp = wrapImported(fd, g) - } - - g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) - for _, fileName := range g.Request.FileToGenerate { - fd := g.allFilesByName[fileName] - if fd == nil { - g.Fail("could not find file named", fileName) - } - fingerprint, err := fingerprintProto(fd.FileDescriptorProto) - if err != nil { - g.Error(err) - } - fd.fingerprint = fingerprint - g.genFiles = append(g.genFiles, fd) - } -} - -// fingerprintProto returns a fingerprint for a message. -// The fingerprint is intended to prevent conflicts between generated fileds, -// not to provide cryptographic security. -func fingerprintProto(m proto.Message) (string, error) { - b, err := proto.Marshal(m) - if err != nil { - return "", err - } - h := sha256.Sum256(b) - return hex.EncodeToString(h[:8]), nil -} - -// Scan the descriptors in this file. For each one, build the slice of nested descriptors -func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { - for _, desc := range descs { - if len(desc.NestedType) != 0 { - for _, nest := range descs { - if nest.parent == desc { - desc.nested = append(desc.nested, nest) - } - } - if len(desc.nested) != len(desc.NestedType) { - g.Fail("internal error: nesting failure for", desc.GetName()) - } - } - } -} - -func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { - for _, desc := range descs { - if len(desc.EnumType) != 0 { - for _, enum := range enums { - if enum.parent == desc { - desc.enums = append(desc.enums, enum) - } - } - if len(desc.enums) != len(desc.EnumType) { - g.Fail("internal error: enum nesting failure for", desc.GetName()) - } - } - } -} - -// Construct the Descriptor -func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { - d := &Descriptor{ - common: common{file}, - DescriptorProto: desc, - parent: parent, - index: index, - } - if parent == nil { - d.path = fmt.Sprintf("%d,%d", messagePath, index) - } else { - d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) - } - - // The only way to distinguish a group from a message is whether - // the containing message has a TYPE_GROUP field that matches. - if parent != nil { - parts := d.TypeName() - if file.Package != nil { - parts = append([]string{*file.Package}, parts...) - } - exp := "." + strings.Join(parts, ".") - for _, field := range parent.Field { - if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { - d.group = true - break - } - } - } - - for _, field := range desc.Extension { - d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) - } - - return d -} - -// Return a slice of all the Descriptors defined within this file -func wrapDescriptors(file *FileDescriptor) []*Descriptor { - sl := make([]*Descriptor, 0, len(file.MessageType)+10) - for i, desc := range file.MessageType { - sl = wrapThisDescriptor(sl, desc, nil, file, i) - } - return sl -} - -// Wrap this Descriptor, recursively -func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { - sl = append(sl, newDescriptor(desc, parent, file, index)) - me := sl[len(sl)-1] - for i, nested := range desc.NestedType { - sl = wrapThisDescriptor(sl, nested, me, file, i) - } - return sl -} - -// Construct the EnumDescriptor -func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { - ed := &EnumDescriptor{ - common: common{file}, - EnumDescriptorProto: desc, - parent: parent, - index: index, - } - if parent == nil { - ed.path = fmt.Sprintf("%d,%d", enumPath, index) - } else { - ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) - } - return ed -} - -// Return a slice of all the EnumDescriptors defined within this file -func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { - sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) - // Top-level enums. - for i, enum := range file.EnumType { - sl = append(sl, newEnumDescriptor(enum, nil, file, i)) - } - // Enums within messages. Enums within embedded messages appear in the outer-most message. - for _, nested := range descs { - for i, enum := range nested.EnumType { - sl = append(sl, newEnumDescriptor(enum, nested, file, i)) - } - } - return sl -} - -// Return a slice of all the top-level ExtensionDescriptors defined within this file. -func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { - var sl []*ExtensionDescriptor - for _, field := range file.Extension { - sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) - } - return sl -} - -// Return a slice of all the types that are publicly imported into this file. -func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { - for _, index := range file.PublicDependency { - df := g.fileByName(file.Dependency[index]) - for _, d := range df.desc { - if d.GetOptions().GetMapEntry() { - continue - } - sl = append(sl, &ImportedDescriptor{common{file}, d}) - } - for _, e := range df.enum { - sl = append(sl, &ImportedDescriptor{common{file}, e}) - } - for _, ext := range df.ext { - sl = append(sl, &ImportedDescriptor{common{file}, ext}) - } - } - return -} - -func extractComments(file *FileDescriptor) { - file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) - for _, loc := range file.GetSourceCodeInfo().GetLocation() { - if loc.LeadingComments == nil { - continue - } - var p []string - for _, n := range loc.Path { - p = append(p, strconv.Itoa(int(n))) - } - file.comments[strings.Join(p, ",")] = loc - } -} - -// BuildTypeNameMap builds the map from fully qualified type names to objects. -// The key names for the map come from the input data, which puts a period at the beginning. -// It should be called after SetPackageNames and before GenerateAllFiles. -func (g *Generator) BuildTypeNameMap() { - g.typeNameToObject = make(map[string]Object) - for _, f := range g.allFiles { - // The names in this loop are defined by the proto world, not us, so the - // package name may be empty. If so, the dotted package name of X will - // be ".X"; otherwise it will be ".pkg.X". - dottedPkg := "." + f.GetPackage() - if dottedPkg != "." { - dottedPkg += "." - } - for _, enum := range f.enum { - name := dottedPkg + dottedSlice(enum.TypeName()) - g.typeNameToObject[name] = enum - } - for _, desc := range f.desc { - name := dottedPkg + dottedSlice(desc.TypeName()) - g.typeNameToObject[name] = desc - } - } -} - -// ObjectNamed, given a fully-qualified input type name as it appears in the input data, -// returns the descriptor for the message or enum with that name. -func (g *Generator) ObjectNamed(typeName string) Object { - o, ok := g.typeNameToObject[typeName] - if !ok { - g.Fail("can't find object with type", typeName) - } - - // If the file of this object isn't a direct dependency of the current file, - // or in the current file, then this object has been publicly imported into - // a dependency of the current file. - // We should return the ImportedDescriptor object for it instead. - direct := *o.File().Name == *g.file.Name - if !direct { - for _, dep := range g.file.Dependency { - if *g.fileByName(dep).Name == *o.File().Name { - direct = true - break - } - } - } - if !direct { - found := false - Loop: - for _, dep := range g.file.Dependency { - df := g.fileByName(*g.fileByName(dep).Name) - for _, td := range df.imp { - if td.o == o { - // Found it! - o = td - found = true - break Loop - } - } - } - if !found { - log.Printf("protoc-gen-go: WARNING: failed finding publicly imported dependency for %v, used in %v", typeName, *g.file.Name) - } - } - - return o -} - -// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. -type AnnotatedAtoms struct { - source string - path string - atoms []interface{} -} - -// Annotate records the file name and proto AST path of a list of atoms -// so that a later call to P can emit a link from each atom to its origin. -func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { - return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} -} - -// printAtom prints the (atomic, non-annotation) argument to the generated output. -func (g *Generator) printAtom(v interface{}) { - switch v := v.(type) { - case string: - g.WriteString(v) - case *string: - g.WriteString(*v) - case bool: - fmt.Fprint(g, v) - case *bool: - fmt.Fprint(g, *v) - case int: - fmt.Fprint(g, v) - case *int32: - fmt.Fprint(g, *v) - case *int64: - fmt.Fprint(g, *v) - case float64: - fmt.Fprint(g, v) - case *float64: - fmt.Fprint(g, *v) - case GoPackageName: - g.WriteString(string(v)) - case GoImportPath: - g.WriteString(strconv.Quote(string(v))) - default: - g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) - } -} - -// P prints the arguments to the generated output. It handles strings and int32s, plus -// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit -// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode -// is true). -func (g *Generator) P(str ...interface{}) { - if !g.writeOutput { - return - } - g.WriteString(g.indent) - for _, v := range str { - switch v := v.(type) { - case *AnnotatedAtoms: - begin := int32(g.Len()) - for _, v := range v.atoms { - g.printAtom(v) - } - if g.annotateCode { - end := int32(g.Len()) - var path []int32 - for _, token := range strings.Split(v.path, ",") { - val, err := strconv.ParseInt(token, 10, 32) - if err != nil { - g.Fail("could not parse proto AST path: ", err.Error()) - } - path = append(path, int32(val)) - } - g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ - Path: path, - SourceFile: &v.source, - Begin: &begin, - End: &end, - }) - } - default: - g.printAtom(v) - } - } - g.WriteByte('\n') -} - -// addInitf stores the given statement to be printed inside the file's init function. -// The statement is given as a format specifier and arguments. -func (g *Generator) addInitf(stmt string, a ...interface{}) { - g.init = append(g.init, fmt.Sprintf(stmt, a...)) -} - -// In Indents the output one tab stop. -func (g *Generator) In() { g.indent += "\t" } - -// Out unindents the output one tab stop. -func (g *Generator) Out() { - if len(g.indent) > 0 { - g.indent = g.indent[1:] - } -} - -// GenerateAllFiles generates the output for all the files we're outputting. -func (g *Generator) GenerateAllFiles() { - // Initialize the plugins - for _, p := range plugins { - p.Init(g) - } - // Generate the output. The generator runs for every file, even the files - // that we don't generate output for, so that we can collate the full list - // of exported symbols to support public imports. - genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) - for _, file := range g.genFiles { - genFileMap[file] = true - } - for _, file := range g.allFiles { - g.Reset() - g.annotations = nil - g.writeOutput = genFileMap[file] - g.generate(file) - if !g.writeOutput { - continue - } - fname := file.goFileName(g.pathType) - g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(fname), - Content: proto.String(g.String()), - }) - if g.annotateCode { - // Store the generated code annotations in text, as the protoc plugin protocol requires that - // strings contain valid UTF-8. - g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(file.goFileName(g.pathType) + ".meta"), - Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), - }) - } - } -} - -// Run all the plugins associated with the file. -func (g *Generator) runPlugins(file *FileDescriptor) { - for _, p := range plugins { - p.Generate(file) - } -} - -// Fill the response protocol buffer with the generated output for all the files we're -// supposed to generate. -func (g *Generator) generate(file *FileDescriptor) { - g.file = file - g.usedPackages = make(map[GoImportPath]bool) - g.packageNames = make(map[GoImportPath]GoPackageName) - g.usedPackageNames = make(map[GoPackageName]bool) - for name := range globalPackageNames { - g.usedPackageNames[name] = true - } - - g.P("// This is a compile-time assertion to ensure that this generated file") - g.P("// is compatible with the proto package it is being compiled against.") - g.P("// A compilation error at this line likely means your copy of the") - g.P("// proto package needs to be updated.") - g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") - g.P() - - for _, td := range g.file.imp { - g.generateImported(td) - } - for _, enum := range g.file.enum { - g.generateEnum(enum) - } - for _, desc := range g.file.desc { - // Don't generate virtual messages for maps. - if desc.GetOptions().GetMapEntry() { - continue - } - g.generateMessage(desc) - } - for _, ext := range g.file.ext { - g.generateExtension(ext) - } - g.generateInitFunction() - - // Run the plugins before the imports so we know which imports are necessary. - g.runPlugins(file) - - g.generateFileDescriptor(file) - - // Generate header and imports last, though they appear first in the output. - rem := g.Buffer - remAnno := g.annotations - g.Buffer = new(bytes.Buffer) - g.annotations = nil - g.generateHeader() - g.generateImports() - if !g.writeOutput { - return - } - // Adjust the offsets for annotations displaced by the header and imports. - for _, anno := range remAnno { - *anno.Begin += int32(g.Len()) - *anno.End += int32(g.Len()) - g.annotations = append(g.annotations, anno) - } - g.Write(rem.Bytes()) - - // Reformat generated code and patch annotation locations. - fset := token.NewFileSet() - original := g.Bytes() - if g.annotateCode { - // make a copy independent of g; we'll need it after Reset. - original = append([]byte(nil), original...) - } - ast, err := parser.ParseFile(fset, "", original, parser.ParseComments) - if err != nil { - // Print out the bad code with line numbers. - // This should never happen in practice, but it can while changing generated code, - // so consider this a debugging aid. - var src bytes.Buffer - s := bufio.NewScanner(bytes.NewReader(original)) - for line := 1; s.Scan(); line++ { - fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) - } - g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) - } - g.Reset() - err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, ast) - if err != nil { - g.Fail("generated Go source code could not be reformatted:", err.Error()) - } - if g.annotateCode { - m, err := remap.Compute(original, g.Bytes()) - if err != nil { - g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) - } - for _, anno := range g.annotations { - new, ok := m.Find(int(*anno.Begin), int(*anno.End)) - if !ok { - g.Fail("span in formatted generated Go source code could not be mapped back to the original code") - } - *anno.Begin = int32(new.Pos) - *anno.End = int32(new.End) - } - } -} - -// Generate the header, including package definition -func (g *Generator) generateHeader() { - g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") - if g.file.GetOptions().GetDeprecated() { - g.P("// ", g.file.Name, " is a deprecated file.") - } else { - g.P("// source: ", g.file.Name) - } - g.P() - - importPath, _, _ := g.file.goPackageOption() - if importPath == "" { - g.P("package ", g.file.packageName) - } else { - g.P("package ", g.file.packageName, " // import ", GoImportPath(g.ImportPrefix)+importPath) - } - g.P() - - if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok { - g.P("/*") - // not using g.PrintComments because this is a /* */ comment block. - text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") - for _, line := range strings.Split(text, "\n") { - line = strings.TrimPrefix(line, " ") - // ensure we don't escape from the block comment - line = strings.Replace(line, "*/", "* /", -1) - g.P(line) - } - g.P("*/") - g.P() - } -} - -// deprecationComment is the standard comment added to deprecated -// messages, fields, enums, and enum values. -var deprecationComment = "// Deprecated: Do not use." - -// PrintComments prints any comments from the source .proto file. -// The path is a comma-separated list of integers. -// It returns an indication of whether any comments were printed. -// See descriptor.proto for its format. -func (g *Generator) PrintComments(path string) bool { - if !g.writeOutput { - return false - } - if c, ok := g.makeComments(path); ok { - g.P(c) - return true - } - return false -} - -// makeComments generates the comment string for the field, no "\n" at the end -func (g *Generator) makeComments(path string) (string, bool) { - loc, ok := g.file.comments[path] - if !ok { - return "", false - } - w := new(bytes.Buffer) - nl := "" - for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") { - fmt.Fprintf(w, "%s// %s", nl, strings.TrimPrefix(line, " ")) - nl = "\n" - } - return w.String(), true -} - -func (g *Generator) fileByName(filename string) *FileDescriptor { - return g.allFilesByName[filename] -} - -// weak returns whether the ith import of the current file is a weak import. -func (g *Generator) weak(i int32) bool { - for _, j := range g.file.WeakDependency { - if j == i { - return true - } - } - return false -} - -// Generate the imports -func (g *Generator) generateImports() { - // We almost always need a proto import. Rather than computing when we - // do, which is tricky when there's a plugin, just import it and - // reference it later. The same argument applies to the fmt and math packages. - g.P("import "+g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto") - g.P("import " + g.Pkg["fmt"] + ` "fmt"`) - g.P("import " + g.Pkg["math"] + ` "math"`) - var ( - imports = make(map[GoImportPath]bool) - strongImports = make(map[GoImportPath]bool) - importPaths []string - ) - for i, s := range g.file.Dependency { - fd := g.fileByName(s) - importPath := fd.importPath - // Do not import our own package. - if importPath == g.file.importPath { - continue - } - if !imports[importPath] { - importPaths = append(importPaths, string(importPath)) - } - imports[importPath] = true - if !g.weak(int32(i)) { - strongImports[importPath] = true - } - } - sort.Strings(importPaths) - for i := range importPaths { - importPath := GoImportPath(importPaths[i]) - packageName := g.GoPackageName(importPath) - fullPath := GoImportPath(g.ImportPrefix) + importPath - // Skip weak imports. - if !strongImports[importPath] { - g.P("// skipping weak import ", packageName, " ", fullPath) - continue - } - // We need to import all the dependencies, even if we don't reference them, - // because other code and tools depend on having the full transitive closure - // of protocol buffer types in the binary. - if _, ok := g.usedPackages[importPath]; !ok { - packageName = "_" - } - g.P("import ", packageName, " ", fullPath) - } - g.P() - // TODO: may need to worry about uniqueness across plugins - for _, p := range plugins { - p.GenerateImports(g.file) - g.P() - } - g.P("// Reference imports to suppress errors if they are not otherwise used.") - g.P("var _ = ", g.Pkg["proto"], ".Marshal") - g.P("var _ = ", g.Pkg["fmt"], ".Errorf") - g.P("var _ = ", g.Pkg["math"], ".Inf") - g.P() -} - -func (g *Generator) generateImported(id *ImportedDescriptor) { - tn := id.TypeName() - sn := tn[len(tn)-1] - df := id.o.File() - filename := *df.Name - if df.importPath == g.file.importPath { - // Don't generate type aliases for files in the same Go package as this one. - g.P("// Ignoring public import of ", sn, " from ", filename) - g.P() - return - } - if !supportTypeAliases { - g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) - } - g.P("// ", sn, " from public import ", filename) - g.usedPackages[df.importPath] = true - - for _, sym := range df.exported[id.o] { - sym.GenerateAlias(g, g.GoPackageName(df.importPath)) - } - - g.P() -} - -// Generate the enum definitions for this EnumDescriptor. -func (g *Generator) generateEnum(enum *EnumDescriptor) { - // The full type name - typeName := enum.TypeName() - // The full type name, CamelCased. - ccTypeName := CamelCaseSlice(typeName) - ccPrefix := enum.prefix() - - deprecatedEnum := "" - if enum.GetOptions().GetDeprecated() { - deprecatedEnum = deprecationComment - } - g.PrintComments(enum.path) - g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) - g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) - g.P("const (") - for i, e := range enum.Value { - etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) - g.PrintComments(etorPath) - - deprecatedValue := "" - if e.GetOptions().GetDeprecated() { - deprecatedValue = deprecationComment - } - - name := ccPrefix + *e.Name - g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) - g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) - } - g.P(")") - g.P("var ", ccTypeName, "_name = map[int32]string{") - generated := make(map[int32]bool) // avoid duplicate values - for _, e := range enum.Value { - duplicate := "" - if _, present := generated[*e.Number]; present { - duplicate = "// Duplicate value: " - } - g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") - generated[*e.Number] = true - } - g.P("}") - g.P("var ", ccTypeName, "_value = map[string]int32{") - for _, e := range enum.Value { - g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") - } - g.P("}") - - if !enum.proto3() { - g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") - g.P("p := new(", ccTypeName, ")") - g.P("*p = x") - g.P("return p") - g.P("}") - } - - g.P("func (x ", ccTypeName, ") String() string {") - g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") - g.P("}") - - if !enum.proto3() { - g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") - g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) - g.P("if err != nil {") - g.P("return err") - g.P("}") - g.P("*x = ", ccTypeName, "(value)") - g.P("return nil") - g.P("}") - } - - var indexes []string - for m := enum.parent; m != nil; m = m.parent { - // XXX: skip groups? - indexes = append([]string{strconv.Itoa(m.index)}, indexes...) - } - indexes = append(indexes, strconv.Itoa(enum.index)) - g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") - g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") - g.P("}") - if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { - g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) - } - - g.P() -} - -// The tag is a string like "varint,2,opt,name=fieldname,def=7" that -// identifies details of the field for the protocol buffer marshaling and unmarshaling -// code. The fields are: -// wire encoding -// protocol tag number -// opt,req,rep for optional, required, or repeated -// packed whether the encoding is "packed" (optional; repeated primitives only) -// name= the original declared name -// enum= the name of the enum type if it is an enum-typed field. -// proto3 if this field is in a proto3 message -// def= string representation of the default value, if any. -// The default value must be in a representation that can be used at run-time -// to generate the default value. Thus bools become 0 and 1, for instance. -func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { - optrepreq := "" - switch { - case isOptional(field): - optrepreq = "opt" - case isRequired(field): - optrepreq = "req" - case isRepeated(field): - optrepreq = "rep" - } - var defaultValue string - if dv := field.DefaultValue; dv != nil { // set means an explicit default - defaultValue = *dv - // Some types need tweaking. - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - if defaultValue == "true" { - defaultValue = "1" - } else { - defaultValue = "0" - } - case descriptor.FieldDescriptorProto_TYPE_STRING, - descriptor.FieldDescriptorProto_TYPE_BYTES: - // Nothing to do. Quoting is done for the whole tag. - case descriptor.FieldDescriptorProto_TYPE_ENUM: - // For enums we need to provide the integer constant. - obj := g.ObjectNamed(field.GetTypeName()) - if id, ok := obj.(*ImportedDescriptor); ok { - // It is an enum that was publicly imported. - // We need the underlying type. - obj = id.o - } - enum, ok := obj.(*EnumDescriptor) - if !ok { - log.Printf("obj is a %T", obj) - if id, ok := obj.(*ImportedDescriptor); ok { - log.Printf("id.o is a %T", id.o) - } - g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) - } - defaultValue = enum.integerValueAsString(defaultValue) - } - defaultValue = ",def=" + defaultValue - } - enum := "" - if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { - // We avoid using obj.GoPackageName(), because we want to use the - // original (proto-world) package name. - obj := g.ObjectNamed(field.GetTypeName()) - if id, ok := obj.(*ImportedDescriptor); ok { - obj = id.o - } - enum = ",enum=" - if pkg := obj.File().GetPackage(); pkg != "" { - enum += pkg + "." - } - enum += CamelCaseSlice(obj.TypeName()) - } - packed := "" - if (field.Options != nil && field.Options.GetPacked()) || - // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: - // "In proto3, repeated fields of scalar numeric types use packed encoding by default." - (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && - isRepeated(field) && isScalar(field)) { - packed = ",packed" - } - fieldName := field.GetName() - name := fieldName - if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { - // We must use the type name for groups instead of - // the field name to preserve capitalization. - // type_name in FieldDescriptorProto is fully-qualified, - // but we only want the local part. - name = *field.TypeName - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[i+1:] - } - } - if json := field.GetJsonName(); json != "" && json != name { - // TODO: escaping might be needed, in which case - // perhaps this should be in its own "json" tag. - name += ",json=" + json - } - name = ",name=" + name - if message.proto3() { - name += ",proto3" - } - oneof := "" - if field.OneofIndex != nil { - oneof = ",oneof" - } - return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s", - wiretype, - field.GetNumber(), - optrepreq, - packed, - name, - enum, - oneof, - defaultValue)) -} - -func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { - switch typ { - case descriptor.FieldDescriptorProto_TYPE_GROUP: - return false - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - return false - case descriptor.FieldDescriptorProto_TYPE_BYTES: - return false - } - return true -} - -// TypeName is the printed name appropriate for an item. If the object is in the current file, -// TypeName drops the package name and underscores the rest. -// Otherwise the object is from another package; and the result is the underscored -// package name followed by the item name. -// The result always has an initial capital. -func (g *Generator) TypeName(obj Object) string { - return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) -} - -// GoType returns a string representing the type name, and the wire type -func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { - // TODO: Options. - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - typ, wire = "float64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - typ, wire = "float32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_INT64: - typ, wire = "int64", "varint" - case descriptor.FieldDescriptorProto_TYPE_UINT64: - typ, wire = "uint64", "varint" - case descriptor.FieldDescriptorProto_TYPE_INT32: - typ, wire = "int32", "varint" - case descriptor.FieldDescriptorProto_TYPE_UINT32: - typ, wire = "uint32", "varint" - case descriptor.FieldDescriptorProto_TYPE_FIXED64: - typ, wire = "uint64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_FIXED32: - typ, wire = "uint32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_BOOL: - typ, wire = "bool", "varint" - case descriptor.FieldDescriptorProto_TYPE_STRING: - typ, wire = "string", "bytes" - case descriptor.FieldDescriptorProto_TYPE_GROUP: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = "*"+g.TypeName(desc), "group" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = "*"+g.TypeName(desc), "bytes" - case descriptor.FieldDescriptorProto_TYPE_BYTES: - typ, wire = "[]byte", "bytes" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = g.TypeName(desc), "varint" - case descriptor.FieldDescriptorProto_TYPE_SFIXED32: - typ, wire = "int32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_SFIXED64: - typ, wire = "int64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_SINT32: - typ, wire = "int32", "zigzag32" - case descriptor.FieldDescriptorProto_TYPE_SINT64: - typ, wire = "int64", "zigzag64" - default: - g.Fail("unknown type for", field.GetName()) - } - if isRepeated(field) { - typ = "[]" + typ - } else if message != nil && message.proto3() { - return - } else if field.OneofIndex != nil && message != nil { - return - } else if needsStar(*field.Type) { - typ = "*" + typ - } - return -} - -func (g *Generator) RecordTypeUse(t string) { - if _, ok := g.typeNameToObject[t]; ok { - // Call ObjectNamed to get the true object to record the use. - obj := g.ObjectNamed(t) - g.usedPackages[obj.GoImportPath()] = true - } -} - -// Method names that may be generated. Fields with these names get an -// underscore appended. Any change to this set is a potential incompatible -// API change because it changes generated field names. -var methodNames = [...]string{ - "Reset", - "String", - "ProtoMessage", - "Marshal", - "Unmarshal", - "ExtensionRangeArray", - "ExtensionMap", - "Descriptor", -} - -// Names of messages in the `google.protobuf` package for which -// we will generate XXX_WellKnownType methods. -var wellKnownTypes = map[string]bool{ - "Any": true, - "Duration": true, - "Empty": true, - "Struct": true, - "Timestamp": true, - - "Value": true, - "ListValue": true, - "DoubleValue": true, - "FloatValue": true, - "Int64Value": true, - "UInt64Value": true, - "Int32Value": true, - "UInt32Value": true, - "BoolValue": true, - "StringValue": true, - "BytesValue": true, -} - -// getterDefault finds the default value for the field to return from a getter, -// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName" -func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string { - if isRepeated(field) { - return "nil" - } - if def := field.GetDefaultValue(); def != "" { - defaultConstant := g.defaultConstantName(goMessageType, field.GetName()) - if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { - return defaultConstant - } - return "append([]byte(nil), " + defaultConstant + "...)" - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - return "false" - case descriptor.FieldDescriptorProto_TYPE_STRING: - return `""` - case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES: - return "nil" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - obj := g.ObjectNamed(field.GetTypeName()) - var enum *EnumDescriptor - if id, ok := obj.(*ImportedDescriptor); ok { - // The enum type has been publicly imported. - enum, _ = id.o.(*EnumDescriptor) - } else { - enum, _ = obj.(*EnumDescriptor) - } - if enum == nil { - log.Printf("don't know how to generate getter for %s", field.GetName()) - return "nil" - } - if len(enum.Value) == 0 { - return "0 // empty enum" - } - first := enum.Value[0].GetName() - return g.DefaultPackageName(obj) + enum.prefix() + first - default: - return "0" - } -} - -// defaultConstantName builds the name of the default constant from the message -// type name and the untouched field name, e.g. "Default_MessageType_FieldName" -func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string { - return "Default_" + goMessageType + "_" + CamelCase(protoFieldName) -} - -// The different types of fields in a message and how to actually print them -// Most of the logic for generateMessage is in the methods of these types. -// -// Note that the content of the field is irrelevant, a simpleField can contain -// anything from a scalar to a group (which is just a message). -// -// Extension fields (and message sets) are however handled separately. -// -// simpleField - a field that is neiter weak nor oneof, possibly repeated -// oneofField - field containing list of subfields: -// - oneofSubField - a field within the oneof - -// msgCtx contais the context for the generator functions. -type msgCtx struct { - goName string // Go struct name of the message, e.g. MessageName - message *Descriptor // The descriptor for the message -} - -// fieldCommon contains data common to all types of fields. -type fieldCommon struct { - goName string // Go name of field, e.g. "FieldName" or "Descriptor_" - protoName string // Name of field in proto language, e.g. "field_name" or "descriptor" - getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_" - goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage" - tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"` - fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0" -} - -// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor". -func (f *fieldCommon) getProtoName() string { - return f.protoName -} - -// getGoType returns the go type of the field as a string, e.g. "*int32". -func (f *fieldCommon) getGoType() string { - return f.goType -} - -// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated. -type simpleField struct { - fieldCommon - protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" - protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 - deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use." - getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" - protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" - comment string // The full comment for the field, e.g. "// Useful information" -} - -// decl prints the declaration of the field in the struct (if any). -func (f *simpleField) decl(g *Generator, mc *msgCtx) { - g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated) -} - -// getter prints the getter for the field. -func (f *simpleField) getter(g *Generator, mc *msgCtx) { - star := "" - tname := f.goType - if needsStar(f.protoType) && tname[0] == '*' { - tname = tname[1:] - star = "*" - } - if f.deprecated != "" { - g.P(f.deprecated) - } - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {") - if f.getterDef == "nil" { // Simpler getter - g.P("if m != nil {") - g.P("return m." + f.goName) - g.P("}") - g.P("return nil") - g.P("}") - g.P() - return - } - if mc.message.proto3() { - g.P("if m != nil {") - } else { - g.P("if m != nil && m." + f.goName + " != nil {") - } - g.P("return " + star + "m." + f.goName) - g.P("}") - g.P("return ", f.getterDef) - g.P("}") - g.P() -} - -// setter prints the setter method of the field. -func (f *simpleField) setter(g *Generator, mc *msgCtx) { - // No setter for regular fields yet -} - -// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". -func (f *simpleField) getProtoDef() string { - return f.protoDef -} - -// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". -func (f *simpleField) getProtoTypeName() string { - return f.protoTypeName -} - -// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. -func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type { - return f.protoType -} - -// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message. -type oneofSubField struct { - fieldCommon - protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" - protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 - oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName" - fieldNumber int // Actual field number, as defined in proto, e.g. 12 - getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" - protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" -} - -// wireTypeName returns a textual wire type, needed for oneof sub fields in generated code. -func (f *oneofSubField) wireTypeName() string { - switch f.protoType { - case descriptor.FieldDescriptorProto_TYPE_FIXED64, - descriptor.FieldDescriptorProto_TYPE_SFIXED64, - descriptor.FieldDescriptorProto_TYPE_DOUBLE: - return "WireFixed64" - case descriptor.FieldDescriptorProto_TYPE_FIXED32, - descriptor.FieldDescriptorProto_TYPE_SFIXED32, - descriptor.FieldDescriptorProto_TYPE_FLOAT: - return "WireFixed32" - case descriptor.FieldDescriptorProto_TYPE_GROUP: - return "WireStartGroup" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE, - descriptor.FieldDescriptorProto_TYPE_STRING, - descriptor.FieldDescriptorProto_TYPE_BYTES: - return "WireBytes" - default: // All others are Varints - return "WireVarint" - } -} - -// typedNil prints a nil casted to the pointer to this field. -// - for XXX_OneofFuncs -func (f *oneofSubField) typedNil(g *Generator) { - g.P("(*", f.oneofTypeName, ")(nil),") -} - -// marshalCase prints the case matching this oneof subfield in the marshalling code. -func (f *oneofSubField) marshalCase(g *Generator) { - g.P("case *", f.oneofTypeName, ":") - wire := f.wireTypeName() - var pre, post string - val := "x." + f.goName // overridden for TYPE_BOOL - switch f.protoType { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - pre = "b.EncodeFixed64(" + g.Pkg["math"] + ".Float64bits(" - post = "))" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - pre = "b.EncodeFixed32(uint64(" + g.Pkg["math"] + ".Float32bits(" - post = ")))" - case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64: - pre, post = "b.EncodeVarint(uint64(", "))" - case descriptor.FieldDescriptorProto_TYPE_INT32, descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_ENUM: - pre, post = "b.EncodeVarint(uint64(", "))" - case descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64: - pre, post = "b.EncodeFixed64(uint64(", "))" - case descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32: - pre, post = "b.EncodeFixed32(uint64(", "))" - case descriptor.FieldDescriptorProto_TYPE_BOOL: - g.P("t := uint64(0)") - g.P("if ", val, " { t = 1 }") - val = "t" - pre, post = "b.EncodeVarint(", ")" - case descriptor.FieldDescriptorProto_TYPE_STRING: - pre, post = "b.EncodeStringBytes(", ")" - case descriptor.FieldDescriptorProto_TYPE_GROUP: - pre, post = "b.Marshal(", ")" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - pre, post = "b.EncodeMessage(", ")" - case descriptor.FieldDescriptorProto_TYPE_BYTES: - pre, post = "b.EncodeRawBytes(", ")" - case descriptor.FieldDescriptorProto_TYPE_SINT32: - pre, post = "b.EncodeZigzag32(uint64(", "))" - case descriptor.FieldDescriptorProto_TYPE_SINT64: - pre, post = "b.EncodeZigzag64(uint64(", "))" - default: - g.Fail("unhandled oneof field type ", f.protoType.String()) - } - g.P("b.EncodeVarint(", f.fieldNumber, "<<3|", g.Pkg["proto"], ".", wire, ")") - if t := f.protoType; t != descriptor.FieldDescriptorProto_TYPE_GROUP && t != descriptor.FieldDescriptorProto_TYPE_MESSAGE { - g.P(pre, val, post) - } else { - g.P("if err := ", pre, val, post, "; err != nil {") - g.P("return err") - g.P("}") - } - if f.protoType == descriptor.FieldDescriptorProto_TYPE_GROUP { - g.P("b.EncodeVarint(", f.fieldNumber, "<<3|", g.Pkg["proto"], ".WireEndGroup)") - } -} - -// unmarshalCase prints the case matching this oneof subfield in the unmarshalling code. -func (f *oneofSubField) unmarshalCase(g *Generator, origOneofName string, oneofName string) { - g.P("case ", f.fieldNumber, ": // ", origOneofName, ".", f.getProtoName()) - g.P("if wire != ", g.Pkg["proto"], ".", f.wireTypeName(), " {") - g.P("return true, ", g.Pkg["proto"], ".ErrInternalBadWireType") - g.P("}") - lhs := "x, err" // overridden for TYPE_MESSAGE and TYPE_GROUP - var dec, cast, cast2 string - switch f.protoType { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - dec, cast = "b.DecodeFixed64()", g.Pkg["math"]+".Float64frombits" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - dec, cast, cast2 = "b.DecodeFixed32()", "uint32", g.Pkg["math"]+".Float32frombits" - case descriptor.FieldDescriptorProto_TYPE_INT64: - dec, cast = "b.DecodeVarint()", "int64" - case descriptor.FieldDescriptorProto_TYPE_UINT64: - dec = "b.DecodeVarint()" - case descriptor.FieldDescriptorProto_TYPE_INT32: - dec, cast = "b.DecodeVarint()", "int32" - case descriptor.FieldDescriptorProto_TYPE_FIXED64: - dec = "b.DecodeFixed64()" - case descriptor.FieldDescriptorProto_TYPE_FIXED32: - dec, cast = "b.DecodeFixed32()", "uint32" - case descriptor.FieldDescriptorProto_TYPE_BOOL: - dec = "b.DecodeVarint()" - // handled specially below - case descriptor.FieldDescriptorProto_TYPE_STRING: - dec = "b.DecodeStringBytes()" - case descriptor.FieldDescriptorProto_TYPE_GROUP: - g.P("msg := new(", f.goType[1:], ")") // drop star - lhs = "err" - dec = "b.DecodeGroup(msg)" - // handled specially below - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - g.P("msg := new(", f.goType[1:], ")") // drop star - lhs = "err" - dec = "b.DecodeMessage(msg)" - // handled specially below - case descriptor.FieldDescriptorProto_TYPE_BYTES: - dec = "b.DecodeRawBytes(true)" - case descriptor.FieldDescriptorProto_TYPE_UINT32: - dec, cast = "b.DecodeVarint()", "uint32" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - dec, cast = "b.DecodeVarint()", f.goType - case descriptor.FieldDescriptorProto_TYPE_SFIXED32: - dec, cast = "b.DecodeFixed32()", "int32" - case descriptor.FieldDescriptorProto_TYPE_SFIXED64: - dec, cast = "b.DecodeFixed64()", "int64" - case descriptor.FieldDescriptorProto_TYPE_SINT32: - dec, cast = "b.DecodeZigzag32()", "int32" - case descriptor.FieldDescriptorProto_TYPE_SINT64: - dec, cast = "b.DecodeZigzag64()", "int64" - default: - g.Fail("unhandled oneof field type ", f.protoType.String()) - } - g.P(lhs, " := ", dec) - val := "x" - if cast != "" { - val = cast + "(" + val + ")" - } - if cast2 != "" { - val = cast2 + "(" + val + ")" - } - switch f.protoType { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - val += " != 0" - case descriptor.FieldDescriptorProto_TYPE_GROUP, - descriptor.FieldDescriptorProto_TYPE_MESSAGE: - val = "msg" - } - g.P("m.", oneofName, " = &", f.oneofTypeName, "{", val, "}") - g.P("return true, err") -} - -// sizerCase prints the case matching this oneof subfield in the sizer code. -func (f *oneofSubField) sizerCase(g *Generator) { - g.P("case *", f.oneofTypeName, ":") - val := "x." + f.goName - var varint, fixed string - switch f.protoType { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - fixed = "8" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - fixed = "4" - case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64, descriptor.FieldDescriptorProto_TYPE_INT32, descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_ENUM: - varint = val - case descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64: - fixed = "8" - case descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32: - fixed = "4" - case descriptor.FieldDescriptorProto_TYPE_BOOL: - fixed = "1" - case descriptor.FieldDescriptorProto_TYPE_STRING: - fixed = "len(" + val + ")" - varint = fixed - case descriptor.FieldDescriptorProto_TYPE_GROUP: - fixed = g.Pkg["proto"] + ".Size(" + val + ")" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - g.P("s := ", g.Pkg["proto"], ".Size(", val, ")") - fixed = "s" - varint = fixed - case descriptor.FieldDescriptorProto_TYPE_BYTES: - fixed = "len(" + val + ")" - varint = fixed - case descriptor.FieldDescriptorProto_TYPE_SINT32: - varint = "(uint32(" + val + ") << 1) ^ uint32((int32(" + val + ") >> 31))" - case descriptor.FieldDescriptorProto_TYPE_SINT64: - varint = "uint64(" + val + " << 1) ^ uint64((int64(" + val + ") >> 63))" - default: - g.Fail("unhandled oneof field type ", f.protoType.String()) - } - // Tag and wire varint is known statically, - // so don't generate code for that part of the size computation. - tagAndWireSize := proto.SizeVarint(uint64(f.fieldNumber << 3)) // wire doesn't affect varint size - g.P("n += ", tagAndWireSize, " // tag and wire") - if varint != "" { - g.P("n += ", g.Pkg["proto"], ".SizeVarint(uint64(", varint, "))") - } - if fixed != "" { - g.P("n += ", fixed) - } - if f.protoType == descriptor.FieldDescriptorProto_TYPE_GROUP { - g.P("n += ", tagAndWireSize, " // tag and wire") - } -} - -// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". -func (f *oneofSubField) getProtoDef() string { - return f.protoDef -} - -// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". -func (f *oneofSubField) getProtoTypeName() string { - return f.protoTypeName -} - -// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. -func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type { - return f.protoType -} - -// oneofField represents the oneof on top level. -// The alternative fields within the oneof are represented by oneofSubField. -type oneofField struct { - fieldCommon - subFields []*oneofSubField // All the possible oneof fields - comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\" -} - -// decl prints the declaration of the field in the struct (if any). -func (f *oneofField) decl(g *Generator, mc *msgCtx) { - comment := f.comment - for _, sf := range f.subFields { - comment += "//\t*" + sf.oneofTypeName + "\n" - } - g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`") -} - -// getter for a oneof field will print additional discriminators and interfaces for the oneof, -// also it prints all the getters for the sub fields. -func (f *oneofField) getter(g *Generator, mc *msgCtx) { - // The discriminator type - g.P("type ", f.goType, " interface {") - g.P(f.goType, "()") - g.P("}") - g.P() - // The subField types, fulfilling the discriminator type contract - for _, sf := range f.subFields { - g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {") - g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`") - g.P("}") - g.P() - } - for _, sf := range f.subFields { - g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}") - g.P() - } - // Getter for the oneof field - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {") - g.P("if m != nil { return m.", f.goName, " }") - g.P("return nil") - g.P("}") - g.P() - // Getters for each oneof - for _, sf := range f.subFields { - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {") - g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {") - g.P("return x.", sf.goName) - g.P("}") - g.P("return ", sf.getterDef) - g.P("}") - g.P() - } -} - -// setter prints the setter method of the field. -func (f *oneofField) setter(g *Generator, mc *msgCtx) { - // No setters for oneof yet -} - -// topLevelField interface implemented by all types of fields on the top level (not oneofSubField). -type topLevelField interface { - decl(g *Generator, mc *msgCtx) // print declaration within the struct - getter(g *Generator, mc *msgCtx) // print getter - setter(g *Generator, mc *msgCtx) // print setter if applicable -} - -// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField). -type defField interface { - getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5" - getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor" - getGoType() string // go type of the field as a string, e.g. "*int32" - getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration" - getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 -} - -// generateDefaultConstants adds constants for default values if needed, which is only if the default value is. -// explicit in the proto. -func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) { - // Collect fields that can have defaults - dFields := []defField{} - for _, pf := range topLevelFields { - if f, ok := pf.(*oneofField); ok { - for _, osf := range f.subFields { - dFields = append(dFields, osf) - } - continue - } - dFields = append(dFields, pf.(defField)) - } - for _, df := range dFields { - def := df.getProtoDef() - if def == "" { - continue - } - fieldname := g.defaultConstantName(mc.goName, df.getProtoName()) - typename := df.getGoType() - if typename[0] == '*' { - typename = typename[1:] - } - kind := "const " - switch { - case typename == "bool": - case typename == "string": - def = strconv.Quote(def) - case typename == "[]byte": - def = "[]byte(" + strconv.Quote(unescape(def)) + ")" - kind = "var " - case def == "inf", def == "-inf", def == "nan": - // These names are known to, and defined by, the protocol language. - switch def { - case "inf": - def = "math.Inf(1)" - case "-inf": - def = "math.Inf(-1)" - case "nan": - def = "math.NaN()" - } - if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT { - def = "float32(" + def + ")" - } - kind = "var " - case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM: - // Must be an enum. Need to construct the prefixed name. - obj := g.ObjectNamed(df.getProtoTypeName()) - var enum *EnumDescriptor - if id, ok := obj.(*ImportedDescriptor); ok { - // The enum type has been publicly imported. - enum, _ = id.o.(*EnumDescriptor) - } else { - enum, _ = obj.(*EnumDescriptor) - } - if enum == nil { - log.Printf("don't know how to generate constant for %s", fieldname) - continue - } - def = g.DefaultPackageName(obj) + enum.prefix() + def - } - g.P(kind, fieldname, " ", typename, " = ", def) - g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""}) - } - g.P() -} - -// generateInternalStructFields just adds the XXX_ fields to the message struct. -func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) { - g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals - if len(mc.message.ExtensionRange) > 0 { - messageset := "" - if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { - messageset = "protobuf_messageset:\"1\" " - } - g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") - } - g.P("XXX_unrecognized\t[]byte `json:\"-\"`") - g.P("XXX_sizecache\tint32 `json:\"-\"`") - -} - -// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer. -func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) { - ofields := []*oneofField{} - for _, f := range topLevelFields { - if o, ok := f.(*oneofField); ok { - ofields = append(ofields, o) - } - } - if len(ofields) == 0 { - return - } - enc := "_" + mc.goName + "_OneofMarshaler" - dec := "_" + mc.goName + "_OneofUnmarshaler" - size := "_" + mc.goName + "_OneofSizer" - encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" - decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" - sizeSig := "(msg " + g.Pkg["proto"] + ".Message) (n int)" - - // OneofFuncs - g.P("// XXX_OneofFuncs is for the internal use of the proto package.") - g.P("func (*", mc.goName, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {") - g.P("return ", enc, ", ", dec, ", ", size, ", []interface{}{") - for _, of := range ofields { - for _, sf := range of.subFields { - sf.typedNil(g) - } - } - g.P("}") - g.P("}") - g.P() - - // marshaler - g.P("func ", enc, encSig, " {") - g.P("m := msg.(*", mc.goName, ")") - for _, of := range ofields { - g.P("// ", of.getProtoName()) - g.P("switch x := m.", of.goName, ".(type) {") - for _, sf := range of.subFields { - // also fills in field.wire - sf.marshalCase(g) - } - g.P("case nil:") - g.P("default:") - g.P(" return ", g.Pkg["fmt"], `.Errorf("`, mc.goName, ".", of.goName, ` has unexpected type %T", x)`) - g.P("}") - } - g.P("return nil") - g.P("}") - g.P() - - // unmarshaler - g.P("func ", dec, decSig, " {") - g.P("m := msg.(*", mc.goName, ")") - g.P("switch tag {") - for _, of := range ofields { - for _, sf := range of.subFields { - sf.unmarshalCase(g, of.getProtoName(), of.goName) - } - } - g.P("default:") - g.P("return false, nil") - g.P("}") - g.P("}") - g.P() - - // sizer - g.P("func ", size, sizeSig, " {") - g.P("m := msg.(*", mc.goName, ")") - for _, of := range ofields { - g.P("// ", of.getProtoName()) - g.P("switch x := m.", of.goName, ".(type) {") - for _, sf := range of.subFields { - // also fills in field.wire - sf.sizerCase(g) - } - g.P("case nil:") - g.P("default:") - g.P("panic(", g.Pkg["fmt"], ".Sprintf(\"proto: unexpected type %T in oneof\", x))") - g.P("}") - } - g.P("return n") - g.P("}") - g.P() -} - -// generateMessageStruct adds the actual struct with it's members (but not methods) to the output. -func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) { - comments := g.PrintComments(mc.message.path) - - // Guarantee deprecation comments appear after user-provided comments. - if mc.message.GetOptions().GetDeprecated() { - if comments { - // Convention: Separate deprecation comments from original - // comments with an empty line. - g.P("//") - } - g.P(deprecationComment) - } - - g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {") - for _, pf := range topLevelFields { - pf.decl(g, mc) - } - g.generateInternalStructFields(mc, topLevelFields) - g.P("}") -} - -// generateGetters adds getters for all fields, including oneofs and weak fields when applicable. -func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) { - for _, pf := range topLevelFields { - pf.getter(g, mc) - } -} - -// generateSetters add setters for all fields, including oneofs and weak fields when applicable. -func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) { - for _, pf := range topLevelFields { - pf.setter(g, mc) - } -} - -// generateCommonMethods adds methods to the message that are not on a per field basis. -func (g *Generator) generateCommonMethods(mc *msgCtx) { - // Reset, String and ProtoMessage methods. - g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }") - g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") - g.P("func (*", mc.goName, ") ProtoMessage() {}") - var indexes []string - for m := mc.message; m != nil; m = m.parent { - indexes = append([]string{strconv.Itoa(m.index)}, indexes...) - } - g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {") - g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") - g.P("}") - // TODO: Revisit the decision to use a XXX_WellKnownType method - // if we change proto.MessageName to work with multiple equivalents. - if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] { - g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`) - } - - // Extension support methods - if len(mc.message.ExtensionRange) > 0 { - // message_set_wire_format only makes sense when extensions are defined. - if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { - g.P() - g.P("func (m *", mc.goName, ") MarshalJSON() ([]byte, error) {") - g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(&m.XXX_InternalExtensions)") - g.P("}") - g.P("func (m *", mc.goName, ") UnmarshalJSON(buf []byte) error {") - g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)") - g.P("}") - } - - g.P() - g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{") - for _, r := range mc.message.ExtensionRange { - end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends - g.P("{Start: ", r.Start, ", End: ", end, "},") - } - g.P("}") - g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") - g.P("return extRange_", mc.goName) - g.P("}") - } - - // TODO: It does not scale to keep adding another method for every - // operation on protos that we want to switch over to using the - // table-driven approach. Instead, we should only add a single method - // that allows getting access to the *InternalMessageInfo struct and then - // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. - - // Wrapper for table-driven marshaling and unmarshaling. - g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {") - g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") - g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)") - g.P("}") - - g.P("func (dst *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") - g.P("xxx_messageInfo_", mc.goName, ".Merge(dst, src)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message - g.P("return xxx_messageInfo_", mc.goName, ".Size(m)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {") - g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)") - g.P("}") - - g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo") - g.P() -} - -// Generate the type, methods and default constant definitions for this Descriptor. -func (g *Generator) generateMessage(message *Descriptor) { - topLevelFields := []topLevelField{} - oFields := make(map[int32]*oneofField) - // The full type name - typeName := message.TypeName() - // The full type name, CamelCased. - goTypeName := CamelCaseSlice(typeName) - - usedNames := make(map[string]bool) - for _, n := range methodNames { - usedNames[n] = true - } - - // allocNames finds a conflict-free variation of the given strings, - // consistently mutating their suffixes. - // It returns the same number of strings. - allocNames := func(ns ...string) []string { - Loop: - for { - for _, n := range ns { - if usedNames[n] { - for i := range ns { - ns[i] += "_" - } - continue Loop - } - } - for _, n := range ns { - usedNames[n] = true - } - return ns - } - } - - mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later - - // Build a structure more suitable for generating the text in one pass - for i, field := range message.Field { - // Allocate the getter and the field at the same time so name - // collisions create field/method consistent names. - // TODO: This allocation occurs based on the order of the fields - // in the proto file, meaning that a change in the field - // ordering can change generated Method/Field names. - base := CamelCase(*field.Name) - ns := allocNames(base, "Get"+base) - fieldName, fieldGetterName := ns[0], ns[1] - typename, wiretype := g.GoType(message, field) - jsonName := *field.Name - tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") - - oneof := field.OneofIndex != nil - if oneof && oFields[*field.OneofIndex] == nil { - odp := message.OneofDecl[int(*field.OneofIndex)] - base := CamelCase(odp.GetName()) - names := allocNames(base, "Get"+base) - fname, gname := names[0], names[1] - - // This is the first field of a oneof we haven't seen before. - // Generate the union field. - oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) - c, ok := g.makeComments(oneofFullPath) - if ok { - c += "\n//\n" - } - c += "// Types that are valid to be assigned to " + fname + ":\n" - // Generate the rest of this comment later, - // when we've computed any disambiguation. - - dname := "is" + goTypeName + "_" + fname - tag := `protobuf_oneof:"` + odp.GetName() + `"` - of := oneofField{ - fieldCommon: fieldCommon{ - goName: fname, - getterName: gname, - goType: dname, - tags: tag, - protoName: odp.GetName(), - fullPath: oneofFullPath, - }, - comment: c, - } - topLevelFields = append(topLevelFields, &of) - oFields[*field.OneofIndex] = &of - } - - if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { - desc := g.ObjectNamed(field.GetTypeName()) - if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { - // Figure out the Go types and tags for the key and value types. - keyField, valField := d.Field[0], d.Field[1] - keyType, keyWire := g.GoType(d, keyField) - valType, valWire := g.GoType(d, valField) - keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) - - // We don't use stars, except for message-typed values. - // Message and enum types are the only two possibly foreign types used in maps, - // so record their use. They are not permitted as map keys. - keyType = strings.TrimPrefix(keyType, "*") - switch *valField.Type { - case descriptor.FieldDescriptorProto_TYPE_ENUM: - valType = strings.TrimPrefix(valType, "*") - g.RecordTypeUse(valField.GetTypeName()) - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - g.RecordTypeUse(valField.GetTypeName()) - default: - valType = strings.TrimPrefix(valType, "*") - } - - typename = fmt.Sprintf("map[%s]%s", keyType, valType) - mapFieldTypes[field] = typename // record for the getter generation - - tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) - } - } - - dvalue := g.getterDefault(field, goTypeName) - if oneof { - tname := goTypeName + "_" + fieldName - // It is possible for this to collide with a message or enum - // nested in this message. Check for collisions. - for { - ok := true - for _, desc := range message.nested { - if CamelCaseSlice(desc.TypeName()) == tname { - ok = false - break - } - } - for _, enum := range message.enums { - if CamelCaseSlice(enum.TypeName()) == tname { - ok = false - break - } - } - if !ok { - tname += "_" - continue - } - break - } - - oneofField := oFields[*field.OneofIndex] - tag := "protobuf:" + g.goTag(message, field, wiretype) - sf := oneofSubField{ - fieldCommon: fieldCommon{ - goName: fieldName, - getterName: fieldGetterName, - goType: typename, - tags: tag, - protoName: field.GetName(), - fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i), - }, - protoTypeName: field.GetTypeName(), - fieldNumber: int(*field.Number), - protoType: *field.Type, - getterDef: dvalue, - protoDef: field.GetDefaultValue(), - oneofTypeName: tname, - } - oneofField.subFields = append(oneofField.subFields, &sf) - g.RecordTypeUse(field.GetTypeName()) - continue - } - - fieldDeprecated := "" - if field.GetOptions().GetDeprecated() { - fieldDeprecated = deprecationComment - } - - fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) - c, ok := g.makeComments(fieldFullPath) - if ok { - c += "\n" - } - rf := simpleField{ - fieldCommon: fieldCommon{ - goName: fieldName, - getterName: fieldGetterName, - goType: typename, - tags: tag, - protoName: field.GetName(), - fullPath: fieldFullPath, - }, - protoTypeName: field.GetTypeName(), - protoType: *field.Type, - deprecated: fieldDeprecated, - getterDef: dvalue, - protoDef: field.GetDefaultValue(), - comment: c, - } - var pf topLevelField = &rf - - topLevelFields = append(topLevelFields, pf) - g.RecordTypeUse(field.GetTypeName()) - } - - mc := &msgCtx{ - goName: goTypeName, - message: message, - } - - g.generateMessageStruct(mc, topLevelFields) - g.P() - g.generateCommonMethods(mc) - g.P() - g.generateDefaultConstants(mc, topLevelFields) - g.P() - g.generateGetters(mc, topLevelFields) - g.P() - g.generateSetters(mc, topLevelFields) - g.P() - g.generateOneofFuncs(mc, topLevelFields) - g.P() - - if !message.group { - - var oneofTypes []string - for _, f := range topLevelFields { - if of, ok := f.(*oneofField); ok { - for _, osf := range of.subFields { - oneofTypes = append(oneofTypes, osf.oneofTypeName) - } - } - } - - opts := message.Options - ms := &messageSymbol{ - sym: goTypeName, - hasExtensions: len(message.ExtensionRange) > 0, - isMessageSet: opts != nil && opts.GetMessageSetWireFormat(), - oneofTypes: oneofTypes, - } - g.file.addExport(message, ms) - } - - for _, ext := range message.ext { - g.generateExtension(ext) - } - - fullName := strings.Join(message.TypeName(), ".") - if g.file.Package != nil { - fullName = *g.file.Package + "." + fullName - } - - g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName) - // Register types for native map types. - for _, k := range mapFieldKeys(mapFieldTypes) { - fullName := strings.TrimPrefix(*k.TypeName, ".") - g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) - } - -} - -type byTypeName []*descriptor.FieldDescriptorProto - -func (a byTypeName) Len() int { return len(a) } -func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } - -// mapFieldKeys returns the keys of m in a consistent order. -func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { - keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Sort(byTypeName(keys)) - return keys -} - -var escapeChars = [256]byte{ - 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', -} - -// unescape reverses the "C" escaping that protoc does for default values of bytes fields. -// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape -// sequences are conveyed, unmodified, into the decoded result. -func unescape(s string) string { - // NB: Sadly, we can't use strconv.Unquote because protoc will escape both - // single and double quotes, but strconv.Unquote only allows one or the - // other (based on actual surrounding quotes of its input argument). - - var out []byte - for len(s) > 0 { - // regular character, or too short to be valid escape - if s[0] != '\\' || len(s) < 2 { - out = append(out, s[0]) - s = s[1:] - } else if c := escapeChars[s[1]]; c != 0 { - // escape sequence - out = append(out, c) - s = s[2:] - } else if s[1] == 'x' || s[1] == 'X' { - // hex escape, e.g. "\x80 - if len(s) < 4 { - // too short to be valid - out = append(out, s[:2]...) - s = s[2:] - continue - } - v, err := strconv.ParseUint(s[2:4], 16, 8) - if err != nil { - out = append(out, s[:4]...) - } else { - out = append(out, byte(v)) - } - s = s[4:] - } else if '0' <= s[1] && s[1] <= '7' { - // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" - // so consume up to 2 more bytes or up to end-of-string - n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) - if n > 3 { - n = 3 - } - v, err := strconv.ParseUint(s[1:1+n], 8, 8) - if err != nil { - out = append(out, s[:1+n]...) - } else { - out = append(out, byte(v)) - } - s = s[1+n:] - } else { - // bad escape, just propagate the slash as-is - out = append(out, s[0]) - s = s[1:] - } - } - - return string(out) -} - -func (g *Generator) generateExtension(ext *ExtensionDescriptor) { - ccTypeName := ext.DescName() - - extObj := g.ObjectNamed(*ext.Extendee) - var extDesc *Descriptor - if id, ok := extObj.(*ImportedDescriptor); ok { - // This is extending a publicly imported message. - // We need the underlying type for goTag. - extDesc = id.o.(*Descriptor) - } else { - extDesc = extObj.(*Descriptor) - } - extendedType := "*" + g.TypeName(extObj) // always use the original - field := ext.FieldDescriptorProto - fieldType, wireType := g.GoType(ext.parent, field) - tag := g.goTag(extDesc, field, wireType) - g.RecordTypeUse(*ext.Extendee) - if n := ext.FieldDescriptorProto.TypeName; n != nil { - // foreign extension type - g.RecordTypeUse(*n) - } - - typeName := ext.TypeName() - - // Special case for proto2 message sets: If this extension is extending - // proto2.bridge.MessageSet, and its final name component is "message_set_extension", - // then drop that last component. - // - // TODO: This should be implemented in the text formatter rather than the generator. - // In addition, the situation for when to apply this special case is implemented - // differently in other languages: - // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 - mset := false - if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { - typeName = typeName[:len(typeName)-1] - mset = true - } - - // For text formatting, the package must be exactly what the .proto file declares, - // ignoring overrides such as the go_package option, and with no dot/underscore mapping. - extName := strings.Join(typeName, ".") - if g.file.Package != nil { - extName = *g.file.Package + "." + extName - } - - g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") - g.P("ExtendedType: (", extendedType, ")(nil),") - g.P("ExtensionType: (", fieldType, ")(nil),") - g.P("Field: ", field.Number, ",") - g.P(`Name: "`, extName, `",`) - g.P("Tag: ", tag, ",") - g.P(`Filename: "`, g.file.GetName(), `",`) - - g.P("}") - g.P() - - if mset { - // Generate a bit more code to register with message_set.go. - g.addInitf("%s.RegisterMessageSetType((%s)(nil), %d, %q)", g.Pkg["proto"], fieldType, *field.Number, extName) - } - - g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) -} - -func (g *Generator) generateInitFunction() { - for _, enum := range g.file.enum { - g.generateEnumRegistration(enum) - } - for _, d := range g.file.desc { - for _, ext := range d.ext { - g.generateExtensionRegistration(ext) - } - } - for _, ext := range g.file.ext { - g.generateExtensionRegistration(ext) - } - if len(g.init) == 0 { - return - } - g.P("func init() {") - for _, l := range g.init { - g.P(l) - } - g.P("}") - g.init = nil -} - -func (g *Generator) generateFileDescriptor(file *FileDescriptor) { - // Make a copy and trim source_code_info data. - // TODO: Trim this more when we know exactly what we need. - pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) - pb.SourceCodeInfo = nil - - b, err := proto.Marshal(pb) - if err != nil { - g.Fail(err.Error()) - } - - var buf bytes.Buffer - w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) - w.Write(b) - w.Close() - b = buf.Bytes() - - v := file.VarName() - g.P() - g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") - g.P("var ", v, " = []byte{") - g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") - for len(b) > 0 { - n := 16 - if n > len(b) { - n = len(b) - } - - s := "" - for _, c := range b[:n] { - s += fmt.Sprintf("0x%02x,", c) - } - g.P(s) - - b = b[n:] - } - g.P("}") -} - -func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { - // // We always print the full (proto-world) package name here. - pkg := enum.File().GetPackage() - if pkg != "" { - pkg += "." - } - // The full type name - typeName := enum.TypeName() - // The full type name, CamelCased. - ccTypeName := CamelCaseSlice(typeName) - g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) -} - -func (g *Generator) generateExtensionRegistration(ext *ExtensionDescriptor) { - g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) -} - -// And now lots of helper functions. - -// Is c an ASCII lower-case letter? -func isASCIILower(c byte) bool { - return 'a' <= c && c <= 'z' -} - -// Is c an ASCII digit? -func isASCIIDigit(c byte) bool { - return '0' <= c && c <= '9' -} - -// CamelCase returns the CamelCased name. -// If there is an interior underscore followed by a lower case letter, -// drop the underscore and convert the letter to upper case. -// There is a remote possibility of this rewrite causing a name collision, -// but it's so remote we're prepared to pretend it's nonexistent - since the -// C++ generator lowercases names, it's extremely unlikely to have two fields -// with different capitalizations. -// In short, _my_field_name_2 becomes XMyFieldName_2. -func CamelCase(s string) string { - if s == "" { - return "" - } - t := make([]byte, 0, 32) - i := 0 - if s[0] == '_' { - // Need a capital letter; drop the '_'. - t = append(t, 'X') - i++ - } - // Invariant: if the next letter is lower case, it must be converted - // to upper case. - // That is, we process a word at a time, where words are marked by _ or - // upper case letter. Digits are treated as words. - for ; i < len(s); i++ { - c := s[i] - if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { - continue // Skip the underscore in s. - } - if isASCIIDigit(c) { - t = append(t, c) - continue - } - // Assume we have a letter now - if not, it's a bogus identifier. - // The next word is a sequence of characters that must start upper case. - if isASCIILower(c) { - c ^= ' ' // Make it a capital letter. - } - t = append(t, c) // Guaranteed not lower case. - // Accept lower case sequence that follows. - for i+1 < len(s) && isASCIILower(s[i+1]) { - i++ - t = append(t, s[i]) - } - } - return string(t) -} - -// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to -// be joined with "_". -func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } - -// dottedSlice turns a sliced name into a dotted name. -func dottedSlice(elem []string) string { return strings.Join(elem, ".") } - -// Is this field optional? -func isOptional(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL -} - -// Is this field required? -func isRequired(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED -} - -// Is this field repeated? -func isRepeated(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED -} - -// Is this field a scalar numeric type? -func isScalar(field *descriptor.FieldDescriptorProto) bool { - if field.Type == nil { - return false - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE, - descriptor.FieldDescriptorProto_TYPE_FLOAT, - descriptor.FieldDescriptorProto_TYPE_INT64, - descriptor.FieldDescriptorProto_TYPE_UINT64, - descriptor.FieldDescriptorProto_TYPE_INT32, - descriptor.FieldDescriptorProto_TYPE_FIXED64, - descriptor.FieldDescriptorProto_TYPE_FIXED32, - descriptor.FieldDescriptorProto_TYPE_BOOL, - descriptor.FieldDescriptorProto_TYPE_UINT32, - descriptor.FieldDescriptorProto_TYPE_ENUM, - descriptor.FieldDescriptorProto_TYPE_SFIXED32, - descriptor.FieldDescriptorProto_TYPE_SFIXED64, - descriptor.FieldDescriptorProto_TYPE_SINT32, - descriptor.FieldDescriptorProto_TYPE_SINT64: - return true - default: - return false - } -} - -// badToUnderscore is the mapping function used to generate Go names from package names, -// which can be dotted in the input .proto file. It replaces non-identifier characters such as -// dot or dash with underscore. -func badToUnderscore(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { - return r - } - return '_' -} - -// baseName returns the last path element of the name, with the last dotted suffix removed. -func baseName(name string) string { - // First, find the last element - if i := strings.LastIndex(name, "/"); i >= 0 { - name = name[i+1:] - } - // Now drop the suffix - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[0:i] - } - return name -} - -// The SourceCodeInfo message describes the location of elements of a parsed -// .proto file by way of a "path", which is a sequence of integers that -// describe the route from a FileDescriptorProto to the relevant submessage. -// The path alternates between a field number of a repeated field, and an index -// into that repeated field. The constants below define the field numbers that -// are used. -// -// See descriptor.proto for more information about this. -const ( - // tag numbers in FileDescriptorProto - packagePath = 2 // package - messagePath = 4 // message_type - enumPath = 5 // enum_type - // tag numbers in DescriptorProto - messageFieldPath = 2 // field - messageMessagePath = 3 // nested_type - messageEnumPath = 4 // enum_type - messageOneofPath = 8 // oneof_decl - // tag numbers in EnumDescriptorProto - enumValuePath = 2 // value -) - -var supportTypeAliases bool - -func init() { - for _, tag := range build.Default.ReleaseTags { - if tag == "go1.9" { - supportTypeAliases = true - return - } - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go deleted file mode 100644 index a9b61036..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go +++ /dev/null @@ -1,117 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package remap handles tracking the locations of Go tokens in a source text -across a rewrite by the Go formatter. -*/ -package remap - -import ( - "fmt" - "go/scanner" - "go/token" -) - -// A Location represents a span of byte offsets in the source text. -type Location struct { - Pos, End int // End is exclusive -} - -// A Map represents a mapping between token locations in an input source text -// and locations in the correspnding output text. -type Map map[Location]Location - -// Find reports whether the specified span is recorded by m, and if so returns -// the new location it was mapped to. If the input span was not found, the -// returned location is the same as the input. -func (m Map) Find(pos, end int) (Location, bool) { - key := Location{ - Pos: pos, - End: end, - } - if loc, ok := m[key]; ok { - return loc, true - } - return key, false -} - -func (m Map) add(opos, oend, npos, nend int) { - m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} -} - -// Compute constructs a location mapping from input to output. An error is -// reported if any of the tokens of output cannot be mapped. -func Compute(input, output []byte) (Map, error) { - itok := tokenize(input) - otok := tokenize(output) - if len(itok) != len(otok) { - return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) - } - m := make(Map) - for i, ti := range itok { - to := otok[i] - if ti.Token != to.Token { - return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) - } - m.add(ti.pos, ti.end, to.pos, to.end) - } - return m, nil -} - -// tokinfo records the span and type of a source token. -type tokinfo struct { - pos, end int - token.Token -} - -func tokenize(src []byte) []tokinfo { - fs := token.NewFileSet() - var s scanner.Scanner - s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) - var info []tokinfo - for { - pos, next, lit := s.Scan() - switch next { - case token.SEMICOLON: - continue - } - info = append(info, tokinfo{ - pos: int(pos - 1), - end: int(pos + token.Pos(len(lit)) - 1), - Token: next, - }) - if next == token.EOF { - break - } - } - return info -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap_test.go deleted file mode 100644 index ccc7fca0..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap_test.go +++ /dev/null @@ -1,82 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package remap - -import ( - "go/format" - "testing" -) - -func TestErrors(t *testing.T) { - tests := []struct { - in, out string - }{ - {"", "x"}, - {"x", ""}, - {"var x int = 5\n", "var x = 5\n"}, - {"these are \"one\" thing", "those are 'another' thing"}, - } - for _, test := range tests { - m, err := Compute([]byte(test.in), []byte(test.out)) - if err != nil { - t.Logf("Got expected error: %v", err) - continue - } - t.Errorf("Compute(%q, %q): got %+v, wanted error", test.in, test.out, m) - } -} - -func TestMatching(t *testing.T) { - // The input is a source text that will be rearranged by the formatter. - const input = `package foo -var s int -func main(){} -` - - output, err := format.Source([]byte(input)) - if err != nil { - t.Fatalf("Formatting failed: %v", err) - } - m, err := Compute([]byte(input), output) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - // Verify that the mapped locations have the same text. - for key, val := range m { - want := input[key.Pos:key.End] - got := string(output[val.Pos:val.End]) - if got != want { - t.Errorf("Token at %d:%d: got %q, want %q", key.Pos, key.End, got, want) - } - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go deleted file mode 100644 index 571147cf..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go +++ /dev/null @@ -1,115 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2013 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package generator - -import ( - "testing" - - "github.com/golang/protobuf/protoc-gen-go/descriptor" -) - -func TestCamelCase(t *testing.T) { - tests := []struct { - in, want string - }{ - {"one", "One"}, - {"one_two", "OneTwo"}, - {"_my_field_name_2", "XMyFieldName_2"}, - {"Something_Capped", "Something_Capped"}, - {"my_Name", "My_Name"}, - {"OneTwo", "OneTwo"}, - {"_", "X"}, - {"_a_", "XA_"}, - } - for _, tc := range tests { - if got := CamelCase(tc.in); got != tc.want { - t.Errorf("CamelCase(%q) = %q, want %q", tc.in, got, tc.want) - } - } -} - -func TestGoPackageOption(t *testing.T) { - tests := []struct { - in string - impPath GoImportPath - pkg GoPackageName - ok bool - }{ - {"", "", "", false}, - {"foo", "", "foo", true}, - {"github.com/golang/bar", "github.com/golang/bar", "bar", true}, - {"github.com/golang/bar;baz", "github.com/golang/bar", "baz", true}, - } - for _, tc := range tests { - d := &FileDescriptor{ - FileDescriptorProto: &descriptor.FileDescriptorProto{ - Options: &descriptor.FileOptions{ - GoPackage: &tc.in, - }, - }, - } - impPath, pkg, ok := d.goPackageOption() - if impPath != tc.impPath || pkg != tc.pkg || ok != tc.ok { - t.Errorf("go_package = %q => (%q, %q, %t), want (%q, %q, %t)", tc.in, - impPath, pkg, ok, tc.impPath, tc.pkg, tc.ok) - } - } -} - -func TestUnescape(t *testing.T) { - tests := []struct { - in string - out string - }{ - // successful cases, including all kinds of escapes - {"", ""}, - {"foo bar baz frob nitz", "foo bar baz frob nitz"}, - {`\000\001\002\003\004\005\006\007`, string([]byte{0, 1, 2, 3, 4, 5, 6, 7})}, - {`\a\b\f\n\r\t\v\\\?\'\"`, string([]byte{'\a', '\b', '\f', '\n', '\r', '\t', '\v', '\\', '?', '\'', '"'})}, - {`\x10\x20\x30\x40\x50\x60\x70\x80`, string([]byte{16, 32, 48, 64, 80, 96, 112, 128})}, - // variable length octal escapes - {`\0\018\222\377\3\04\005\6\07`, string([]byte{0, 1, '8', 0222, 255, 3, 4, 5, 6, 7})}, - // malformed escape sequences left as is - {"foo \\g bar", "foo \\g bar"}, - {"foo \\xg0 bar", "foo \\xg0 bar"}, - {"\\", "\\"}, - {"\\x", "\\x"}, - {"\\xf", "\\xf"}, - {"\\777", "\\777"}, // overflows byte - } - for _, tc := range tests { - s := unescape(tc.in) - if s != tc.out { - t.Errorf("doUnescape(%q) = %q; should have been %q", tc.in, s, tc.out) - } - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/golden_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/golden_test.go deleted file mode 100644 index 2630de68..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/golden_test.go +++ /dev/null @@ -1,422 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "go/build" - "go/parser" - "go/token" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "strings" - "testing" -) - -// Set --regenerate to regenerate the golden files. -var regenerate = flag.Bool("regenerate", false, "regenerate golden files") - -// When the environment variable RUN_AS_PROTOC_GEN_GO is set, we skip running -// tests and instead act as protoc-gen-go. This allows the test binary to -// pass itself to protoc. -func init() { - if os.Getenv("RUN_AS_PROTOC_GEN_GO") != "" { - main() - os.Exit(0) - } -} - -func TestGolden(t *testing.T) { - workdir, err := ioutil.TempDir("", "proto-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(workdir) - - // Find all the proto files we need to compile. We assume that each directory - // contains the files for a single package. - supportTypeAliases := hasReleaseTag("go1.9") - packages := map[string][]string{} - err = filepath.Walk("testdata", func(path string, info os.FileInfo, err error) error { - if filepath.Base(path) == "import_public" && !supportTypeAliases { - // Public imports require type alias support. - return filepath.SkipDir - } - if !strings.HasSuffix(path, ".proto") { - return nil - } - dir := filepath.Dir(path) - packages[dir] = append(packages[dir], path) - return nil - }) - if err != nil { - t.Fatal(err) - } - - // Compile each package, using this binary as protoc-gen-go. - for _, sources := range packages { - args := []string{"-Itestdata", "--go_out=plugins=grpc,paths=source_relative:" + workdir} - args = append(args, sources...) - protoc(t, args) - } - - // Compare each generated file to the golden version. - filepath.Walk(workdir, func(genPath string, info os.FileInfo, _ error) error { - if info.IsDir() { - return nil - } - - // For each generated file, figure out the path to the corresponding - // golden file in the testdata directory. - relPath, err := filepath.Rel(workdir, genPath) - if err != nil { - t.Errorf("filepath.Rel(%q, %q): %v", workdir, genPath, err) - return nil - } - if filepath.SplitList(relPath)[0] == ".." { - t.Errorf("generated file %q is not relative to %q", genPath, workdir) - } - goldenPath := filepath.Join("testdata", relPath) - - got, err := ioutil.ReadFile(genPath) - if err != nil { - t.Error(err) - return nil - } - if *regenerate { - // If --regenerate set, just rewrite the golden files. - err := ioutil.WriteFile(goldenPath, got, 0666) - if err != nil { - t.Error(err) - } - return nil - } - - want, err := ioutil.ReadFile(goldenPath) - if err != nil { - t.Error(err) - return nil - } - - want = fdescRE.ReplaceAll(want, nil) - got = fdescRE.ReplaceAll(got, nil) - if bytes.Equal(got, want) { - return nil - } - - cmd := exec.Command("diff", "-u", goldenPath, genPath) - out, _ := cmd.CombinedOutput() - t.Errorf("golden file differs: %v\n%v", relPath, string(out)) - return nil - }) -} - -var fdescRE = regexp.MustCompile(`(?ms)^var fileDescriptor.*}`) - -// Source files used by TestParameters. -const ( - aProto = ` -syntax = "proto3"; -package test.alpha; -option go_package = "package/alpha"; -import "beta/b.proto"; -message M { test.beta.M field = 1; }` - - bProto = ` -syntax = "proto3"; -package test.beta; -// no go_package option -message M {}` -) - -func TestParameters(t *testing.T) { - for _, test := range []struct { - parameters string - wantFiles map[string]bool - wantImportsA map[string]bool - wantPackageA string - wantPackageB string - }{{ - parameters: "", - wantFiles: map[string]bool{ - "package/alpha/a.pb.go": true, - "beta/b.pb.go": true, - }, - wantPackageA: "alpha", - wantPackageB: "test_beta", - wantImportsA: map[string]bool{ - "github.com/golang/protobuf/proto": true, - "beta": true, - }, - }, { - parameters: "import_prefix=prefix", - wantFiles: map[string]bool{ - "package/alpha/a.pb.go": true, - "beta/b.pb.go": true, - }, - wantPackageA: "alpha", - wantPackageB: "test_beta", - wantImportsA: map[string]bool{ - // This really doesn't seem like useful behavior. - "prefixgithub.com/golang/protobuf/proto": true, - "prefixbeta": true, - }, - }, { - // import_path only affects the 'package' line. - parameters: "import_path=import/path/of/pkg", - wantPackageA: "alpha", - wantPackageB: "pkg", - wantFiles: map[string]bool{ - "package/alpha/a.pb.go": true, - "beta/b.pb.go": true, - }, - }, { - parameters: "Mbeta/b.proto=package/gamma", - wantFiles: map[string]bool{ - "package/alpha/a.pb.go": true, - "beta/b.pb.go": true, - }, - wantPackageA: "alpha", - wantPackageB: "test_beta", - wantImportsA: map[string]bool{ - "github.com/golang/protobuf/proto": true, - // Rewritten by the M parameter. - "package/gamma": true, - }, - }, { - parameters: "import_prefix=prefix,Mbeta/b.proto=package/gamma", - wantFiles: map[string]bool{ - "package/alpha/a.pb.go": true, - "beta/b.pb.go": true, - }, - wantPackageA: "alpha", - wantPackageB: "test_beta", - wantImportsA: map[string]bool{ - // import_prefix applies after M. - "prefixpackage/gamma": true, - }, - }, { - parameters: "paths=source_relative", - wantFiles: map[string]bool{ - "alpha/a.pb.go": true, - "beta/b.pb.go": true, - }, - wantPackageA: "alpha", - wantPackageB: "test_beta", - }, { - parameters: "paths=source_relative,import_prefix=prefix", - wantFiles: map[string]bool{ - // import_prefix doesn't affect filenames. - "alpha/a.pb.go": true, - "beta/b.pb.go": true, - }, - wantPackageA: "alpha", - wantPackageB: "test_beta", - }} { - name := test.parameters - if name == "" { - name = "defaults" - } - // TODO: Switch to t.Run when we no longer support Go 1.6. - t.Logf("TEST: %v", name) - workdir, err := ioutil.TempDir("", "proto-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(workdir) - - for _, dir := range []string{"alpha", "beta", "out"} { - if err := os.MkdirAll(filepath.Join(workdir, dir), 0777); err != nil { - t.Fatal(err) - } - } - - if err := ioutil.WriteFile(filepath.Join(workdir, "alpha", "a.proto"), []byte(aProto), 0666); err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile(filepath.Join(workdir, "beta", "b.proto"), []byte(bProto), 0666); err != nil { - t.Fatal(err) - } - - protoc(t, []string{ - "-I" + workdir, - "--go_out=" + test.parameters + ":" + filepath.Join(workdir, "out"), - filepath.Join(workdir, "alpha", "a.proto"), - }) - protoc(t, []string{ - "-I" + workdir, - "--go_out=" + test.parameters + ":" + filepath.Join(workdir, "out"), - filepath.Join(workdir, "beta", "b.proto"), - }) - - contents := make(map[string]string) - gotFiles := make(map[string]bool) - outdir := filepath.Join(workdir, "out") - filepath.Walk(outdir, func(p string, info os.FileInfo, _ error) error { - if info.IsDir() { - return nil - } - base := filepath.Base(p) - if base == "a.pb.go" || base == "b.pb.go" { - b, err := ioutil.ReadFile(p) - if err != nil { - t.Fatal(err) - } - contents[base] = string(b) - } - relPath, _ := filepath.Rel(outdir, p) - gotFiles[relPath] = true - return nil - }) - for got := range gotFiles { - if runtime.GOOS == "windows" { - got = filepath.ToSlash(got) - } - if !test.wantFiles[got] { - t.Errorf("unexpected output file: %v", got) - } - } - for want := range test.wantFiles { - if runtime.GOOS == "windows" { - want = filepath.FromSlash(want) - } - if !gotFiles[want] { - t.Errorf("missing output file: %v", want) - } - } - gotPackageA, gotImports, err := parseFile(contents["a.pb.go"]) - if err != nil { - t.Fatal(err) - } - gotPackageB, _, err := parseFile(contents["b.pb.go"]) - if err != nil { - t.Fatal(err) - } - if got, want := gotPackageA, test.wantPackageA; want != got { - t.Errorf("output file a.pb.go is package %q, want %q", got, want) - } - if got, want := gotPackageB, test.wantPackageB; want != got { - t.Errorf("output file b.pb.go is package %q, want %q", got, want) - } - missingImport := false - WantImport: - for want := range test.wantImportsA { - for _, imp := range gotImports { - if `"`+want+`"` == imp { - continue WantImport - } - } - t.Errorf("output file a.pb.go does not contain expected import %q", want) - missingImport = true - } - if missingImport { - t.Error("got imports:") - for _, imp := range gotImports { - t.Errorf(" %v", imp) - } - } - } -} - -func TestPackageComment(t *testing.T) { - workdir, err := ioutil.TempDir("", "proto-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(workdir) - - var packageRE = regexp.MustCompile(`(?m)^package .*`) - - for i, test := range []struct { - goPackageOption string - wantPackage string - }{{ - goPackageOption: ``, - wantPackage: `package proto_package`, - }, { - goPackageOption: `option go_package = "go_package";`, - wantPackage: `package go_package`, - }, { - goPackageOption: `option go_package = "import/path/of/go_package";`, - wantPackage: `package go_package // import "import/path/of/go_package"`, - }, { - goPackageOption: `option go_package = "import/path/of/something;go_package";`, - wantPackage: `package go_package // import "import/path/of/something"`, - }, { - goPackageOption: `option go_package = "import_path;go_package";`, - wantPackage: `package go_package // import "import_path"`, - }} { - srcName := filepath.Join(workdir, fmt.Sprintf("%d.proto", i)) - tgtName := filepath.Join(workdir, fmt.Sprintf("%d.pb.go", i)) - - buf := &bytes.Buffer{} - fmt.Fprintln(buf, `syntax = "proto3";`) - fmt.Fprintln(buf, `package proto_package;`) - fmt.Fprintln(buf, test.goPackageOption) - if err := ioutil.WriteFile(srcName, buf.Bytes(), 0666); err != nil { - t.Fatal(err) - } - - protoc(t, []string{"-I" + workdir, "--go_out=paths=source_relative:" + workdir, srcName}) - - out, err := ioutil.ReadFile(tgtName) - if err != nil { - t.Fatal(err) - } - - pkg := packageRE.Find(out) - if pkg == nil { - t.Errorf("generated .pb.go contains no package line\n\nsource:\n%v\n\noutput:\n%v", buf.String(), string(out)) - continue - } - - if got, want := string(pkg), test.wantPackage; got != want { - t.Errorf("unexpected package statement with go_package = %q\n got: %v\nwant: %v", test.goPackageOption, got, want) - } - } -} - -// parseFile returns a file's package name and a list of all packages it imports. -func parseFile(source string) (packageName string, imports []string, err error) { - fset := token.NewFileSet() - f, err := parser.ParseFile(fset, "", source, parser.ImportsOnly) - if err != nil { - return "", nil, err - } - for _, imp := range f.Imports { - imports = append(imports, imp.Path.Value) - } - return f.Name.Name, imports, nil -} - -func protoc(t *testing.T, args []string) { - cmd := exec.Command("protoc", "--plugin=protoc-gen-go="+os.Args[0]) - cmd.Args = append(cmd.Args, args...) - // We set the RUN_AS_PROTOC_GEN_GO environment variable to indicate that - // the subprocess should act as a proto compiler rather than a test. - cmd.Env = append(os.Environ(), "RUN_AS_PROTOC_GEN_GO=1") - out, err := cmd.CombinedOutput() - if len(out) > 0 || err != nil { - t.Log("RUNNING: ", strings.Join(cmd.Args, " ")) - } - if len(out) > 0 { - t.Log(string(out)) - } - if err != nil { - t.Fatalf("protoc: %v", err) - } -} - -func hasReleaseTag(want string) bool { - for _, tag := range build.Default.ReleaseTags { - if tag == want { - return true - } - } - return false -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go deleted file mode 100644 index faef1abb..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go +++ /dev/null @@ -1,484 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package grpc outputs gRPC service descriptions in Go code. -// It runs as a plugin for the Go protocol buffer compiler plugin. -// It is linked in to protoc-gen-go. -package grpc - -import ( - "fmt" - "path" - "strconv" - "strings" - - pb "github.com/golang/protobuf/protoc-gen-go/descriptor" - "github.com/golang/protobuf/protoc-gen-go/generator" -) - -// generatedCodeVersion indicates a version of the generated code. -// It is incremented whenever an incompatibility between the generated code and -// the grpc package is introduced; the generated code references -// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion). -const generatedCodeVersion = 4 - -// Paths for packages used by code generated in this file, -// relative to the import_prefix of the generator.Generator. -const ( - contextPkgPath = "golang.org/x/net/context" - grpcPkgPath = "google.golang.org/grpc" -) - -func init() { - generator.RegisterPlugin(new(grpc)) -} - -// grpc is an implementation of the Go protocol buffer compiler's -// plugin architecture. It generates bindings for gRPC support. -type grpc struct { - gen *generator.Generator -} - -// Name returns the name of this plugin, "grpc". -func (g *grpc) Name() string { - return "grpc" -} - -// The names for packages imported in the generated code. -// They may vary from the final path component of the import path -// if the name is used by other packages. -var ( - contextPkg string - grpcPkg string -) - -// Init initializes the plugin. -func (g *grpc) Init(gen *generator.Generator) { - g.gen = gen - contextPkg = generator.RegisterUniquePackageName("context", nil) - grpcPkg = generator.RegisterUniquePackageName("grpc", nil) -} - -// Given a type name defined in a .proto, return its object. -// Also record that we're using it, to guarantee the associated import. -func (g *grpc) objectNamed(name string) generator.Object { - g.gen.RecordTypeUse(name) - return g.gen.ObjectNamed(name) -} - -// Given a type name defined in a .proto, return its name as we will print it. -func (g *grpc) typeName(str string) string { - return g.gen.TypeName(g.objectNamed(str)) -} - -// P forwards to g.gen.P. -func (g *grpc) P(args ...interface{}) { g.gen.P(args...) } - -// Generate generates code for the services in the given file. -func (g *grpc) Generate(file *generator.FileDescriptor) { - if len(file.FileDescriptorProto.Service) == 0 { - return - } - - g.P("// Reference imports to suppress errors if they are not otherwise used.") - g.P("var _ ", contextPkg, ".Context") - g.P("var _ ", grpcPkg, ".ClientConn") - g.P() - - // Assert version compatibility. - g.P("// This is a compile-time assertion to ensure that this generated file") - g.P("// is compatible with the grpc package it is being compiled against.") - g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion) - g.P() - - for i, service := range file.FileDescriptorProto.Service { - g.generateService(file, service, i) - } -} - -// GenerateImports generates the import declaration for this file. -func (g *grpc) GenerateImports(file *generator.FileDescriptor) { - if len(file.FileDescriptorProto.Service) == 0 { - return - } - g.P("import (") - g.P(contextPkg, " ", generator.GoImportPath(path.Join(string(g.gen.ImportPrefix), contextPkgPath))) - g.P(grpcPkg, " ", generator.GoImportPath(path.Join(string(g.gen.ImportPrefix), grpcPkgPath))) - g.P(")") - g.P() -} - -// reservedClientName records whether a client name is reserved on the client side. -var reservedClientName = map[string]bool{ - // TODO: do we need any in gRPC? -} - -func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } - -// deprecationComment is the standard comment added to deprecated -// messages, fields, enums, and enum values. -var deprecationComment = "// Deprecated: Do not use." - -// generateService generates all the code for the named service. -func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { - path := fmt.Sprintf("6,%d", index) // 6 means service. - - origServName := service.GetName() - fullServName := origServName - if pkg := file.GetPackage(); pkg != "" { - fullServName = pkg + "." + fullServName - } - servName := generator.CamelCase(origServName) - deprecated := service.GetOptions().GetDeprecated() - - g.P() - g.P(fmt.Sprintf(`// %sClient is the client API for %s service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.`, servName, servName)) - - // Client interface. - if deprecated { - g.P("//") - g.P(deprecationComment) - } - g.P("type ", servName, "Client interface {") - for i, method := range service.Method { - g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. - g.P(g.generateClientSignature(servName, method)) - } - g.P("}") - g.P() - - // Client structure. - g.P("type ", unexport(servName), "Client struct {") - g.P("cc *", grpcPkg, ".ClientConn") - g.P("}") - g.P() - - // NewClient factory. - if deprecated { - g.P(deprecationComment) - } - g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {") - g.P("return &", unexport(servName), "Client{cc}") - g.P("}") - g.P() - - var methodIndex, streamIndex int - serviceDescVar := "_" + servName + "_serviceDesc" - // Client method implementations. - for _, method := range service.Method { - var descExpr string - if !method.GetServerStreaming() && !method.GetClientStreaming() { - // Unary RPC method - descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex) - methodIndex++ - } else { - // Streaming RPC method - descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex) - streamIndex++ - } - g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) - } - - // Server interface. - serverType := servName + "Server" - g.P("// ", serverType, " is the server API for ", servName, " service.") - if deprecated { - g.P("//") - g.P(deprecationComment) - } - g.P("type ", serverType, " interface {") - for i, method := range service.Method { - g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. - g.P(g.generateServerSignature(servName, method)) - } - g.P("}") - g.P() - - // Server registration. - if deprecated { - g.P(deprecationComment) - } - g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") - g.P("s.RegisterService(&", serviceDescVar, `, srv)`) - g.P("}") - g.P() - - // Server handler implementations. - var handlerNames []string - for _, method := range service.Method { - hname := g.generateServerMethod(servName, fullServName, method) - handlerNames = append(handlerNames, hname) - } - - // Service descriptor. - g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {") - g.P("ServiceName: ", strconv.Quote(fullServName), ",") - g.P("HandlerType: (*", serverType, ")(nil),") - g.P("Methods: []", grpcPkg, ".MethodDesc{") - for i, method := range service.Method { - if method.GetServerStreaming() || method.GetClientStreaming() { - continue - } - g.P("{") - g.P("MethodName: ", strconv.Quote(method.GetName()), ",") - g.P("Handler: ", handlerNames[i], ",") - g.P("},") - } - g.P("},") - g.P("Streams: []", grpcPkg, ".StreamDesc{") - for i, method := range service.Method { - if !method.GetServerStreaming() && !method.GetClientStreaming() { - continue - } - g.P("{") - g.P("StreamName: ", strconv.Quote(method.GetName()), ",") - g.P("Handler: ", handlerNames[i], ",") - if method.GetServerStreaming() { - g.P("ServerStreams: true,") - } - if method.GetClientStreaming() { - g.P("ClientStreams: true,") - } - g.P("},") - } - g.P("},") - g.P("Metadata: \"", file.GetName(), "\",") - g.P("}") - g.P() -} - -// generateClientSignature returns the client-side signature for a method. -func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { - origMethName := method.GetName() - methName := generator.CamelCase(origMethName) - if reservedClientName[methName] { - methName += "_" - } - reqArg := ", in *" + g.typeName(method.GetInputType()) - if method.GetClientStreaming() { - reqArg = "" - } - respName := "*" + g.typeName(method.GetOutputType()) - if method.GetServerStreaming() || method.GetClientStreaming() { - respName = servName + "_" + generator.CamelCase(origMethName) + "Client" - } - return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName) -} - -func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) { - sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName()) - methName := generator.CamelCase(method.GetName()) - inType := g.typeName(method.GetInputType()) - outType := g.typeName(method.GetOutputType()) - - if method.GetOptions().GetDeprecated() { - g.P(deprecationComment) - } - g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") - if !method.GetServerStreaming() && !method.GetClientStreaming() { - g.P("out := new(", outType, ")") - // TODO: Pass descExpr to Invoke. - g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) - g.P("if err != nil { return nil, err }") - g.P("return out, nil") - g.P("}") - g.P() - return - } - streamType := unexport(servName) + methName + "Client" - g.P("stream, err := c.cc.NewStream(ctx, ", descExpr, `, "`, sname, `", opts...)`) - g.P("if err != nil { return nil, err }") - g.P("x := &", streamType, "{stream}") - if !method.GetClientStreaming() { - g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") - g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") - } - g.P("return x, nil") - g.P("}") - g.P() - - genSend := method.GetClientStreaming() - genRecv := method.GetServerStreaming() - genCloseAndRecv := !method.GetServerStreaming() - - // Stream auxiliary types and methods. - g.P("type ", servName, "_", methName, "Client interface {") - if genSend { - g.P("Send(*", inType, ") error") - } - if genRecv { - g.P("Recv() (*", outType, ", error)") - } - if genCloseAndRecv { - g.P("CloseAndRecv() (*", outType, ", error)") - } - g.P(grpcPkg, ".ClientStream") - g.P("}") - g.P() - - g.P("type ", streamType, " struct {") - g.P(grpcPkg, ".ClientStream") - g.P("}") - g.P() - - if genSend { - g.P("func (x *", streamType, ") Send(m *", inType, ") error {") - g.P("return x.ClientStream.SendMsg(m)") - g.P("}") - g.P() - } - if genRecv { - g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {") - g.P("m := new(", outType, ")") - g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") - g.P("return m, nil") - g.P("}") - g.P() - } - if genCloseAndRecv { - g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {") - g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") - g.P("m := new(", outType, ")") - g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") - g.P("return m, nil") - g.P("}") - g.P() - } -} - -// generateServerSignature returns the server-side signature for a method. -func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { - origMethName := method.GetName() - methName := generator.CamelCase(origMethName) - if reservedClientName[methName] { - methName += "_" - } - - var reqArgs []string - ret := "error" - if !method.GetServerStreaming() && !method.GetClientStreaming() { - reqArgs = append(reqArgs, contextPkg+".Context") - ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" - } - if !method.GetClientStreaming() { - reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType())) - } - if method.GetServerStreaming() || method.GetClientStreaming() { - reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server") - } - - return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret -} - -func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string { - methName := generator.CamelCase(method.GetName()) - hname := fmt.Sprintf("_%s_%s_Handler", servName, methName) - inType := g.typeName(method.GetInputType()) - outType := g.typeName(method.GetOutputType()) - - if !method.GetServerStreaming() && !method.GetClientStreaming() { - g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {") - g.P("in := new(", inType, ")") - g.P("if err := dec(in); err != nil { return nil, err }") - g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }") - g.P("info := &", grpcPkg, ".UnaryServerInfo{") - g.P("Server: srv,") - g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",") - g.P("}") - g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {") - g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))") - g.P("}") - g.P("return interceptor(ctx, in, info, handler)") - g.P("}") - g.P() - return hname - } - streamType := unexport(servName) + methName + "Server" - g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {") - if !method.GetClientStreaming() { - g.P("m := new(", inType, ")") - g.P("if err := stream.RecvMsg(m); err != nil { return err }") - g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})") - } else { - g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})") - } - g.P("}") - g.P() - - genSend := method.GetServerStreaming() - genSendAndClose := !method.GetServerStreaming() - genRecv := method.GetClientStreaming() - - // Stream auxiliary types and methods. - g.P("type ", servName, "_", methName, "Server interface {") - if genSend { - g.P("Send(*", outType, ") error") - } - if genSendAndClose { - g.P("SendAndClose(*", outType, ") error") - } - if genRecv { - g.P("Recv() (*", inType, ", error)") - } - g.P(grpcPkg, ".ServerStream") - g.P("}") - g.P() - - g.P("type ", streamType, " struct {") - g.P(grpcPkg, ".ServerStream") - g.P("}") - g.P() - - if genSend { - g.P("func (x *", streamType, ") Send(m *", outType, ") error {") - g.P("return x.ServerStream.SendMsg(m)") - g.P("}") - g.P() - } - if genSendAndClose { - g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {") - g.P("return x.ServerStream.SendMsg(m)") - g.P("}") - g.P() - } - if genRecv { - g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {") - g.P("m := new(", inType, ")") - g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") - g.P("return m, nil") - g.P("}") - g.P() - } - - return hname -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go deleted file mode 100644 index 532a5500..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go +++ /dev/null @@ -1,34 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package main - -import _ "github.com/golang/protobuf/protoc-gen-go/grpc" diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go deleted file mode 100644 index 8e2486de..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go +++ /dev/null @@ -1,98 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate -// Go code. Run it by building this program and putting it in your path with -// the name -// protoc-gen-go -// That word 'go' at the end becomes part of the option string set for the -// protocol compiler, so once the protocol compiler (protoc) is installed -// you can run -// protoc --go_out=output_directory input_directory/file.proto -// to generate Go bindings for the protocol defined by file.proto. -// With that input, the output will be written to -// output_directory/file.pb.go -// -// The generated code is documented in the package comment for -// the library. -// -// See the README and documentation for protocol buffers to learn more: -// https://developers.google.com/protocol-buffers/ -package main - -import ( - "io/ioutil" - "os" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/generator" -) - -func main() { - // Begin by allocating a generator. The request and response structures are stored there - // so we can do error handling easily - the response structure contains the field to - // report failure. - g := generator.New() - - data, err := ioutil.ReadAll(os.Stdin) - if err != nil { - g.Error(err, "reading input") - } - - if err := proto.Unmarshal(data, g.Request); err != nil { - g.Error(err, "parsing input proto") - } - - if len(g.Request.FileToGenerate) == 0 { - g.Fail("no files to generate") - } - - g.CommandLineParameters(g.Request.GetParameter()) - - // Create a wrapped version of the Descriptors and EnumDescriptors that - // point to the file that defines them. - g.WrapTypes() - - g.SetPackageNames() - g.BuildTypeNameMap() - - g.GenerateAllFiles() - - // Send back the results. - data, err = proto.Marshal(g.Response) - if err != nil { - g.Error(err, "failed to marshal output proto") - } - _, err = os.Stdout.Write(data) - if err != nil { - g.Error(err, "failed to write output proto") - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go deleted file mode 100644 index 61bfc10e..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go +++ /dev/null @@ -1,369 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/compiler/plugin.proto - -/* -Package plugin_go is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/compiler/plugin.proto - -It has these top-level messages: - Version - CodeGeneratorRequest - CodeGeneratorResponse -*/ -package plugin_go - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The version number of protocol compiler. -type Version struct { - Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (m *Version) Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) -} -func (dst *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(dst, src) -} -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *Version) GetMajor() int32 { - if m != nil && m.Major != nil { - return *m.Major - } - return 0 -} - -func (m *Version) GetMinor() int32 { - if m != nil && m.Minor != nil { - return *m.Minor - } - return 0 -} - -func (m *Version) GetPatch() int32 { - if m != nil && m.Patch != nil { - return *m.Patch - } - return 0 -} - -func (m *Version) GetSuffix() string { - if m != nil && m.Suffix != nil { - return *m.Suffix - } - return "" -} - -// An encoded CodeGeneratorRequest is written to the plugin's stdin. -type CodeGeneratorRequest struct { - // The .proto files that were explicitly listed on the command-line. The - // code generator should generate code only for these files. Each file's - // descriptor will be included in proto_file, below. - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` - // The generator parameter passed on the command-line. - Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` - // FileDescriptorProtos for all files in files_to_generate and everything - // they import. The files will appear in topological order, so each file - // appears before any file that imports it. - // - // protoc guarantees that all proto_files will be written after - // the fields above, even though this is not technically guaranteed by the - // protobuf wire format. This theoretically could allow a plugin to stream - // in the FileDescriptorProtos and handle them one by one rather than read - // the entire set into memory at once. However, as of this writing, this - // is not similarly optimized on protoc's end -- it will store all fields in - // memory at once before sending them to the plugin. - // - // Type names of fields and extensions in the FileDescriptorProto are always - // fully qualified. - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` - // The version number of protocol compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } -func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorRequest) ProtoMessage() {} -func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) -} -func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) -} -func (m *CodeGeneratorRequest) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorRequest.Size(m) -} -func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo - -func (m *CodeGeneratorRequest) GetFileToGenerate() []string { - if m != nil { - return m.FileToGenerate - } - return nil -} - -func (m *CodeGeneratorRequest) GetParameter() string { - if m != nil && m.Parameter != nil { - return *m.Parameter - } - return "" -} - -func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { - if m != nil { - return m.ProtoFile - } - return nil -} - -func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { - if m != nil { - return m.CompilerVersion - } - return nil -} - -// The plugin writes an encoded CodeGeneratorResponse to stdout. -type CodeGeneratorResponse struct { - // Error message. If non-empty, code generation failed. The plugin process - // should exit with status code zero even if it reports an error in this way. - // - // This should be used to indicate errors in .proto files which prevent the - // code generator from generating correct code. Errors which indicate a - // problem in protoc itself -- such as the input CodeGeneratorRequest being - // unparseable -- should be reported by writing a message to stderr and - // exiting with a non-zero status code. - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } -func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse) ProtoMessage() {} -func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) -} -func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) -} -func (m *CodeGeneratorResponse) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorResponse.Size(m) -} -func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo - -func (m *CodeGeneratorResponse) GetError() string { - if m != nil && m.Error != nil { - return *m.Error - } - return "" -} - -func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { - if m != nil { - return m.File - } - return nil -} - -// Represents a single generated file. -type CodeGeneratorResponse_File struct { - // The file name, relative to the output directory. The name must not - // contain "." or ".." components and must be relative, not be absolute (so, - // the file cannot lie outside the output directory). "/" must be used as - // the path separator, not "\". - // - // If the name is omitted, the content will be appended to the previous - // file. This allows the generator to break large files into small chunks, - // and allows the generated text to be streamed back to protoc so that large - // files need not reside completely in memory at one time. Note that as of - // this writing protoc does not optimize for this -- it will read the entire - // CodeGeneratorResponse before writing files to disk. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // If non-empty, indicates that the named file should already exist, and the - // content here is to be inserted into that file at a defined insertion - // point. This feature allows a code generator to extend the output - // produced by another code generator. The original generator may provide - // insertion points by placing special annotations in the file that look - // like: - // @@protoc_insertion_point(NAME) - // The annotation can have arbitrary text before and after it on the line, - // which allows it to be placed in a comment. NAME should be replaced with - // an identifier naming the point -- this is what other generators will use - // as the insertion_point. Code inserted at this point will be placed - // immediately above the line containing the insertion point (thus multiple - // insertions to the same point will come out in the order they were added). - // The double-@ is intended to make it unlikely that the generated code - // could contain things that look like insertion points by accident. - // - // For example, the C++ code generator places the following line in the - // .pb.h files that it generates: - // // @@protoc_insertion_point(namespace_scope) - // This line appears within the scope of the file's package namespace, but - // outside of any particular class. Another plugin can then specify the - // insertion_point "namespace_scope" to generate additional classes or - // other declarations that should be placed in this scope. - // - // Note that if the line containing the insertion point begins with - // whitespace, the same whitespace will be added to every line of the - // inserted text. This is useful for languages like Python, where - // indentation matters. In these languages, the insertion point comment - // should be indented the same amount as any inserted code will need to be - // in order to work correctly in that context. - // - // The code generator that generates the initial file and the one which - // inserts into it must both run as part of a single invocation of protoc. - // Code generators are executed in the order in which they appear on the - // command line. - // - // If |insertion_point| is present, |name| must also be present. - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` - // The file contents. - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } -func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse_File) ProtoMessage() {} -func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } -func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) -} -func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) -} -func (m *CodeGeneratorResponse_File) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) -} -func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo - -func (m *CodeGeneratorResponse_File) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { - if m != nil && m.InsertionPoint != nil { - return *m.InsertionPoint - } - return "" -} - -func (m *CodeGeneratorResponse_File) GetContent() string { - if m != nil && m.Content != nil { - return *m.Content - } - return "" -} - -func init() { - proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") - proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") - proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") - proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") -} - -func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 417 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, - 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, - 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, - 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, - 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, - 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, - 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, - 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, - 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, - 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, - 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, - 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, - 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, - 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, - 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, - 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, - 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, - 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, - 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, - 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, - 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, - 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, - 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, - 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, - 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, - 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, - 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden deleted file mode 100644 index 8953d0ff..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google/protobuf/compiler/plugin.proto -// DO NOT EDIT! - -package google_protobuf_compiler - -import proto "github.com/golang/protobuf/proto" -import "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference proto and math imports to suppress error if they are not otherwise used. -var _ = proto.GetString -var _ = math.Inf - -type CodeGeneratorRequest struct { - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` - Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } -func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorRequest) ProtoMessage() {} - -func (this *CodeGeneratorRequest) GetParameter() string { - if this != nil && this.Parameter != nil { - return *this.Parameter - } - return "" -} - -type CodeGeneratorResponse struct { - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } -func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorResponse) ProtoMessage() {} - -func (this *CodeGeneratorResponse) GetError() string { - if this != nil && this.Error != nil { - return *this.Error - } - return "" -} - -type CodeGeneratorResponse_File struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } -func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorResponse_File) ProtoMessage() {} - -func (this *CodeGeneratorResponse_File) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { - if this != nil && this.InsertionPoint != nil { - return *this.InsertionPoint - } - return "" -} - -func (this *CodeGeneratorResponse_File) GetContent() string { - if this != nil && this.Content != nil { - return *this.Content - } - return "" -} - -func init() { -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto deleted file mode 100644 index 5b557452..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto +++ /dev/null @@ -1,167 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// -// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to -// change. -// -// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is -// just a program that reads a CodeGeneratorRequest from stdin and writes a -// CodeGeneratorResponse to stdout. -// -// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead -// of dealing with the raw protocol defined here. -// -// A plugin executable needs only to be placed somewhere in the path. The -// plugin should be named "protoc-gen-$NAME", and will then be used when the -// flag "--${NAME}_out" is passed to protoc. - -syntax = "proto2"; -package google.protobuf.compiler; -option java_package = "com.google.protobuf.compiler"; -option java_outer_classname = "PluginProtos"; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; - -import "google/protobuf/descriptor.proto"; - -// The version number of protocol compiler. -message Version { - optional int32 major = 1; - optional int32 minor = 2; - optional int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - optional string suffix = 4; -} - -// An encoded CodeGeneratorRequest is written to the plugin's stdin. -message CodeGeneratorRequest { - // The .proto files that were explicitly listed on the command-line. The - // code generator should generate code only for these files. Each file's - // descriptor will be included in proto_file, below. - repeated string file_to_generate = 1; - - // The generator parameter passed on the command-line. - optional string parameter = 2; - - // FileDescriptorProtos for all files in files_to_generate and everything - // they import. The files will appear in topological order, so each file - // appears before any file that imports it. - // - // protoc guarantees that all proto_files will be written after - // the fields above, even though this is not technically guaranteed by the - // protobuf wire format. This theoretically could allow a plugin to stream - // in the FileDescriptorProtos and handle them one by one rather than read - // the entire set into memory at once. However, as of this writing, this - // is not similarly optimized on protoc's end -- it will store all fields in - // memory at once before sending them to the plugin. - // - // Type names of fields and extensions in the FileDescriptorProto are always - // fully qualified. - repeated FileDescriptorProto proto_file = 15; - - // The version number of protocol compiler. - optional Version compiler_version = 3; - -} - -// The plugin writes an encoded CodeGeneratorResponse to stdout. -message CodeGeneratorResponse { - // Error message. If non-empty, code generation failed. The plugin process - // should exit with status code zero even if it reports an error in this way. - // - // This should be used to indicate errors in .proto files which prevent the - // code generator from generating correct code. Errors which indicate a - // problem in protoc itself -- such as the input CodeGeneratorRequest being - // unparseable -- should be reported by writing a message to stderr and - // exiting with a non-zero status code. - optional string error = 1; - - // Represents a single generated file. - message File { - // The file name, relative to the output directory. The name must not - // contain "." or ".." components and must be relative, not be absolute (so, - // the file cannot lie outside the output directory). "/" must be used as - // the path separator, not "\". - // - // If the name is omitted, the content will be appended to the previous - // file. This allows the generator to break large files into small chunks, - // and allows the generated text to be streamed back to protoc so that large - // files need not reside completely in memory at one time. Note that as of - // this writing protoc does not optimize for this -- it will read the entire - // CodeGeneratorResponse before writing files to disk. - optional string name = 1; - - // If non-empty, indicates that the named file should already exist, and the - // content here is to be inserted into that file at a defined insertion - // point. This feature allows a code generator to extend the output - // produced by another code generator. The original generator may provide - // insertion points by placing special annotations in the file that look - // like: - // @@protoc_insertion_point(NAME) - // The annotation can have arbitrary text before and after it on the line, - // which allows it to be placed in a comment. NAME should be replaced with - // an identifier naming the point -- this is what other generators will use - // as the insertion_point. Code inserted at this point will be placed - // immediately above the line containing the insertion point (thus multiple - // insertions to the same point will come out in the order they were added). - // The double-@ is intended to make it unlikely that the generated code - // could contain things that look like insertion points by accident. - // - // For example, the C++ code generator places the following line in the - // .pb.h files that it generates: - // // @@protoc_insertion_point(namespace_scope) - // This line appears within the scope of the file's package namespace, but - // outside of any particular class. Another plugin can then specify the - // insertion_point "namespace_scope" to generate additional classes or - // other declarations that should be placed in this scope. - // - // Note that if the line containing the insertion point begins with - // whitespace, the same whitespace will be added to every line of the - // inserted text. This is useful for languages like Python, where - // indentation matters. In these languages, the insertion point comment - // should be indented the same amount as any inserted code will need to be - // in order to work correctly in that context. - // - // The code generator that generates the initial file and the one which - // inserts into it must both run as part of a single invocation of protoc. - // Code generators are executed in the order in which they appear on the - // command line. - // - // If |insertion_point| is present, |name| must also be present. - optional string insertion_point = 2; - - // The file contents. - optional string content = 15; - } - repeated File file = 15; -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go deleted file mode 100644 index 63e4e137..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go +++ /dev/null @@ -1,234 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// deprecated/deprecated.proto is a deprecated file. - -package deprecated // import "github.com/golang/protobuf/protoc-gen-go/testdata/deprecated" - -/* -package deprecated contains only deprecated messages and services. -*/ - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// DeprecatedEnum contains deprecated values. -type DeprecatedEnum int32 // Deprecated: Do not use. -const ( - // DEPRECATED is the iota value of this enum. - DeprecatedEnum_DEPRECATED DeprecatedEnum = 0 // Deprecated: Do not use. -) - -var DeprecatedEnum_name = map[int32]string{ - 0: "DEPRECATED", -} -var DeprecatedEnum_value = map[string]int32{ - "DEPRECATED": 0, -} - -func (x DeprecatedEnum) String() string { - return proto.EnumName(DeprecatedEnum_name, int32(x)) -} -func (DeprecatedEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_deprecated_9e1889ba21817fad, []int{0} -} - -// DeprecatedRequest is a request to DeprecatedCall. -// -// Deprecated: Do not use. -type DeprecatedRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeprecatedRequest) Reset() { *m = DeprecatedRequest{} } -func (m *DeprecatedRequest) String() string { return proto.CompactTextString(m) } -func (*DeprecatedRequest) ProtoMessage() {} -func (*DeprecatedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_deprecated_9e1889ba21817fad, []int{0} -} -func (m *DeprecatedRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeprecatedRequest.Unmarshal(m, b) -} -func (m *DeprecatedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeprecatedRequest.Marshal(b, m, deterministic) -} -func (dst *DeprecatedRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeprecatedRequest.Merge(dst, src) -} -func (m *DeprecatedRequest) XXX_Size() int { - return xxx_messageInfo_DeprecatedRequest.Size(m) -} -func (m *DeprecatedRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeprecatedRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeprecatedRequest proto.InternalMessageInfo - -// Deprecated: Do not use. -type DeprecatedResponse struct { - // DeprecatedField contains a DeprecatedEnum. - DeprecatedField DeprecatedEnum `protobuf:"varint,1,opt,name=deprecated_field,json=deprecatedField,proto3,enum=deprecated.DeprecatedEnum" json:"deprecated_field,omitempty"` // Deprecated: Do not use. - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeprecatedResponse) Reset() { *m = DeprecatedResponse{} } -func (m *DeprecatedResponse) String() string { return proto.CompactTextString(m) } -func (*DeprecatedResponse) ProtoMessage() {} -func (*DeprecatedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_deprecated_9e1889ba21817fad, []int{1} -} -func (m *DeprecatedResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeprecatedResponse.Unmarshal(m, b) -} -func (m *DeprecatedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeprecatedResponse.Marshal(b, m, deterministic) -} -func (dst *DeprecatedResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeprecatedResponse.Merge(dst, src) -} -func (m *DeprecatedResponse) XXX_Size() int { - return xxx_messageInfo_DeprecatedResponse.Size(m) -} -func (m *DeprecatedResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeprecatedResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeprecatedResponse proto.InternalMessageInfo - -// Deprecated: Do not use. -func (m *DeprecatedResponse) GetDeprecatedField() DeprecatedEnum { - if m != nil { - return m.DeprecatedField - } - return DeprecatedEnum_DEPRECATED -} - -func init() { - proto.RegisterType((*DeprecatedRequest)(nil), "deprecated.DeprecatedRequest") - proto.RegisterType((*DeprecatedResponse)(nil), "deprecated.DeprecatedResponse") - proto.RegisterEnum("deprecated.DeprecatedEnum", DeprecatedEnum_name, DeprecatedEnum_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// DeprecatedServiceClient is the client API for DeprecatedService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -// -// Deprecated: Do not use. -type DeprecatedServiceClient interface { - // DeprecatedCall takes a DeprecatedRequest and returns a DeprecatedResponse. - DeprecatedCall(ctx context.Context, in *DeprecatedRequest, opts ...grpc.CallOption) (*DeprecatedResponse, error) -} - -type deprecatedServiceClient struct { - cc *grpc.ClientConn -} - -// Deprecated: Do not use. -func NewDeprecatedServiceClient(cc *grpc.ClientConn) DeprecatedServiceClient { - return &deprecatedServiceClient{cc} -} - -// Deprecated: Do not use. -func (c *deprecatedServiceClient) DeprecatedCall(ctx context.Context, in *DeprecatedRequest, opts ...grpc.CallOption) (*DeprecatedResponse, error) { - out := new(DeprecatedResponse) - err := c.cc.Invoke(ctx, "/deprecated.DeprecatedService/DeprecatedCall", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DeprecatedServiceServer is the server API for DeprecatedService service. -// -// Deprecated: Do not use. -type DeprecatedServiceServer interface { - // DeprecatedCall takes a DeprecatedRequest and returns a DeprecatedResponse. - DeprecatedCall(context.Context, *DeprecatedRequest) (*DeprecatedResponse, error) -} - -// Deprecated: Do not use. -func RegisterDeprecatedServiceServer(s *grpc.Server, srv DeprecatedServiceServer) { - s.RegisterService(&_DeprecatedService_serviceDesc, srv) -} - -func _DeprecatedService_DeprecatedCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeprecatedRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DeprecatedServiceServer).DeprecatedCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/deprecated.DeprecatedService/DeprecatedCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DeprecatedServiceServer).DeprecatedCall(ctx, req.(*DeprecatedRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _DeprecatedService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "deprecated.DeprecatedService", - HandlerType: (*DeprecatedServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "DeprecatedCall", - Handler: _DeprecatedService_DeprecatedCall_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "deprecated/deprecated.proto", -} - -func init() { - proto.RegisterFile("deprecated/deprecated.proto", fileDescriptor_deprecated_9e1889ba21817fad) -} - -var fileDescriptor_deprecated_9e1889ba21817fad = []byte{ - // 248 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x49, 0x2d, 0x28, - 0x4a, 0x4d, 0x4e, 0x2c, 0x49, 0x4d, 0xd1, 0x47, 0x30, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, - 0xb8, 0x10, 0x22, 0x4a, 0xe2, 0x5c, 0x82, 0x2e, 0x70, 0x5e, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, - 0x89, 0x15, 0x93, 0x04, 0xa3, 0x52, 0x32, 0x97, 0x10, 0xb2, 0x44, 0x71, 0x41, 0x7e, 0x5e, 0x71, - 0xaa, 0x90, 0x27, 0x97, 0x00, 0x42, 0x73, 0x7c, 0x5a, 0x66, 0x6a, 0x4e, 0x8a, 0x04, 0xa3, 0x02, - 0xa3, 0x06, 0x9f, 0x91, 0x94, 0x1e, 0x92, 0x3d, 0x08, 0x9d, 0xae, 0x79, 0xa5, 0xb9, 0x4e, 0x4c, - 0x12, 0x8c, 0x41, 0xfc, 0x08, 0x69, 0x37, 0x90, 0x36, 0x90, 0x25, 0x5a, 0x1a, 0x5c, 0x7c, 0xa8, - 0x4a, 0x85, 0x84, 0xb8, 0xb8, 0x5c, 0x5c, 0x03, 0x82, 0x5c, 0x9d, 0x1d, 0x43, 0x5c, 0x5d, 0x04, - 0x18, 0xa4, 0x98, 0x38, 0x18, 0xa5, 0x98, 0x24, 0x18, 0x8d, 0xf2, 0x90, 0xdd, 0x19, 0x9c, 0x5a, - 0x54, 0x96, 0x99, 0x9c, 0x2a, 0x14, 0x82, 0xac, 0xdd, 0x39, 0x31, 0x27, 0x47, 0x48, 0x16, 0xbb, - 0x2b, 0xa0, 0x1e, 0x93, 0x92, 0xc3, 0x25, 0x0d, 0xf1, 0x9e, 0x12, 0x73, 0x07, 0x13, 0xa3, 0x14, - 0x88, 0x70, 0x72, 0x8c, 0xb2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, - 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x07, 0x07, 0x5f, 0x52, 0x69, 0x1a, 0x84, 0x91, 0xac, - 0x9b, 0x9e, 0x9a, 0xa7, 0x9b, 0x9e, 0xaf, 0x5f, 0x92, 0x5a, 0x5c, 0x92, 0x92, 0x58, 0x92, 0x88, - 0x14, 0xd2, 0x3b, 0x18, 0x19, 0x93, 0xd8, 0xc0, 0xaa, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x0e, 0xf5, 0x6c, 0x87, 0x8c, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.proto deleted file mode 100644 index b314166d..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.proto +++ /dev/null @@ -1,69 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -// package deprecated contains only deprecated messages and services. -package deprecated; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/deprecated"; - -option deprecated = true; // file-level deprecation - -// DeprecatedRequest is a request to DeprecatedCall. -message DeprecatedRequest { - option deprecated = true; -} - -message DeprecatedResponse { - // comment for DeprecatedResponse is omitted to guarantee deprecation - // message doesn't append unnecessary comments. - option deprecated = true; - // DeprecatedField contains a DeprecatedEnum. - DeprecatedEnum deprecated_field = 1 [deprecated=true]; -} - -// DeprecatedEnum contains deprecated values. -enum DeprecatedEnum { - option deprecated = true; - // DEPRECATED is the iota value of this enum. - DEPRECATED = 0 [deprecated=true]; -} - -// DeprecatedService is for making DeprecatedCalls -service DeprecatedService { - option deprecated = true; - - // DeprecatedCall takes a DeprecatedRequest and returns a DeprecatedResponse. - rpc DeprecatedCall(DeprecatedRequest) returns (DeprecatedResponse) { - option deprecated = true; - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.pb.go deleted file mode 100644 index a08e8eda..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.pb.go +++ /dev/null @@ -1,139 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: extension_base/extension_base.proto - -package extension_base // import "github.com/golang/protobuf/protoc-gen-go/testdata/extension_base" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type BaseMessage struct { - Height *int32 `protobuf:"varint,1,opt,name=height" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BaseMessage) Reset() { *m = BaseMessage{} } -func (m *BaseMessage) String() string { return proto.CompactTextString(m) } -func (*BaseMessage) ProtoMessage() {} -func (*BaseMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_base_41d3c712c9fc37fc, []int{0} -} - -var extRange_BaseMessage = []proto.ExtensionRange{ - {Start: 4, End: 9}, - {Start: 16, End: 536870911}, -} - -func (*BaseMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_BaseMessage -} -func (m *BaseMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BaseMessage.Unmarshal(m, b) -} -func (m *BaseMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BaseMessage.Marshal(b, m, deterministic) -} -func (dst *BaseMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_BaseMessage.Merge(dst, src) -} -func (m *BaseMessage) XXX_Size() int { - return xxx_messageInfo_BaseMessage.Size(m) -} -func (m *BaseMessage) XXX_DiscardUnknown() { - xxx_messageInfo_BaseMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_BaseMessage proto.InternalMessageInfo - -func (m *BaseMessage) GetHeight() int32 { - if m != nil && m.Height != nil { - return *m.Height - } - return 0 -} - -// Another message that may be extended, using message_set_wire_format. -type OldStyleMessage struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `protobuf_messageset:"1" json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OldStyleMessage) Reset() { *m = OldStyleMessage{} } -func (m *OldStyleMessage) String() string { return proto.CompactTextString(m) } -func (*OldStyleMessage) ProtoMessage() {} -func (*OldStyleMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_base_41d3c712c9fc37fc, []int{1} -} - -func (m *OldStyleMessage) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) -} -func (m *OldStyleMessage) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) -} - -var extRange_OldStyleMessage = []proto.ExtensionRange{ - {Start: 100, End: 2147483646}, -} - -func (*OldStyleMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OldStyleMessage -} -func (m *OldStyleMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OldStyleMessage.Unmarshal(m, b) -} -func (m *OldStyleMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OldStyleMessage.Marshal(b, m, deterministic) -} -func (dst *OldStyleMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_OldStyleMessage.Merge(dst, src) -} -func (m *OldStyleMessage) XXX_Size() int { - return xxx_messageInfo_OldStyleMessage.Size(m) -} -func (m *OldStyleMessage) XXX_DiscardUnknown() { - xxx_messageInfo_OldStyleMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_OldStyleMessage proto.InternalMessageInfo - -func init() { - proto.RegisterType((*BaseMessage)(nil), "extension_base.BaseMessage") - proto.RegisterType((*OldStyleMessage)(nil), "extension_base.OldStyleMessage") -} - -func init() { - proto.RegisterFile("extension_base/extension_base.proto", fileDescriptor_extension_base_41d3c712c9fc37fc) -} - -var fileDescriptor_extension_base_41d3c712c9fc37fc = []byte{ - // 179 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4e, 0xad, 0x28, 0x49, - 0xcd, 0x2b, 0xce, 0xcc, 0xcf, 0x8b, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x47, 0xe5, 0xea, 0x15, 0x14, - 0xe5, 0x97, 0xe4, 0x0b, 0xf1, 0xa1, 0x8a, 0x2a, 0x99, 0x72, 0x71, 0x3b, 0x25, 0x16, 0xa7, 0xfa, - 0xa6, 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x89, 0x71, 0xb1, 0x65, 0xa4, 0x66, 0xa6, 0x67, 0x94, - 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x06, 0x41, 0x79, 0x5a, 0x2c, 0x1c, 0x2c, 0x02, 0x5c, 0x5a, - 0x1c, 0x1c, 0x02, 0x02, 0x0d, 0x0d, 0x0d, 0x0d, 0x4c, 0x4a, 0xf2, 0x5c, 0xfc, 0xfe, 0x39, 0x29, - 0xc1, 0x25, 0x95, 0x39, 0x30, 0xad, 0x5a, 0x1c, 0x1c, 0x29, 0x02, 0xff, 0xff, 0xff, 0xff, 0xcf, - 0x6e, 0xc5, 0xc4, 0xc1, 0xe8, 0xe4, 0x14, 0xe5, 0x90, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, - 0x9c, 0x9f, 0xab, 0x9f, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x0f, 0x76, 0x42, 0x52, 0x69, 0x1a, - 0x84, 0x91, 0xac, 0x9b, 0x9e, 0x9a, 0xa7, 0x9b, 0x9e, 0xaf, 0x5f, 0x92, 0x5a, 0x5c, 0x92, 0x92, - 0x58, 0x92, 0x88, 0xe6, 0x62, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7a, 0x7f, 0xb7, 0x2a, 0xd1, - 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.proto deleted file mode 100644 index 0ba74def..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.proto +++ /dev/null @@ -1,48 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; - -package extension_base; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/extension_base"; - -message BaseMessage { - optional int32 height = 1; - extensions 4 to 9; - extensions 16 to max; -} - -// Another message that may be extended, using message_set_wire_format. -message OldStyleMessage { - option message_set_wire_format = true; - extensions 100 to max; -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.pb.go deleted file mode 100644 index b3732169..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.pb.go +++ /dev/null @@ -1,78 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: extension_extra/extension_extra.proto - -package extension_extra // import "github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type ExtraMessage struct { - Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExtraMessage) Reset() { *m = ExtraMessage{} } -func (m *ExtraMessage) String() string { return proto.CompactTextString(m) } -func (*ExtraMessage) ProtoMessage() {} -func (*ExtraMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_extra_83adf2410f49f816, []int{0} -} -func (m *ExtraMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtraMessage.Unmarshal(m, b) -} -func (m *ExtraMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtraMessage.Marshal(b, m, deterministic) -} -func (dst *ExtraMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtraMessage.Merge(dst, src) -} -func (m *ExtraMessage) XXX_Size() int { - return xxx_messageInfo_ExtraMessage.Size(m) -} -func (m *ExtraMessage) XXX_DiscardUnknown() { - xxx_messageInfo_ExtraMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtraMessage proto.InternalMessageInfo - -func (m *ExtraMessage) GetWidth() int32 { - if m != nil && m.Width != nil { - return *m.Width - } - return 0 -} - -func init() { - proto.RegisterType((*ExtraMessage)(nil), "extension_extra.ExtraMessage") -} - -func init() { - proto.RegisterFile("extension_extra/extension_extra.proto", fileDescriptor_extension_extra_83adf2410f49f816) -} - -var fileDescriptor_extension_extra_83adf2410f49f816 = []byte{ - // 133 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xad, 0x28, 0x49, - 0xcd, 0x2b, 0xce, 0xcc, 0xcf, 0x8b, 0x4f, 0xad, 0x28, 0x29, 0x4a, 0xd4, 0x47, 0xe3, 0xeb, 0x15, - 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xf1, 0xa3, 0x09, 0x2b, 0xa9, 0x70, 0xf1, 0xb8, 0x82, 0x18, 0xbe, - 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0xa9, 0x42, 0x22, 0x5c, 0xac, 0xe5, 0x99, 0x29, 0x25, 0x19, 0x12, - 0x8c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x93, 0x73, 0x94, 0x63, 0x7a, 0x66, 0x49, 0x46, - 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, 0xba, 0x3e, 0xd8, 0xc4, - 0xa4, 0xd2, 0x34, 0x08, 0x23, 0x59, 0x37, 0x3d, 0x35, 0x4f, 0x37, 0x3d, 0x5f, 0xbf, 0x24, 0xb5, - 0xb8, 0x24, 0x25, 0xb1, 0x04, 0xc3, 0x05, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf1, 0xec, 0xe3, - 0xb7, 0xa3, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.proto deleted file mode 100644 index 1dd03e70..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.proto +++ /dev/null @@ -1,40 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; - -package extension_extra; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra"; - -message ExtraMessage { - optional int32 width = 1; -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go deleted file mode 100644 index 05247299..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go +++ /dev/null @@ -1,206 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Test that we can use protocol buffers that use extensions. - -package testdata - -import ( - "bytes" - "regexp" - "testing" - - "github.com/golang/protobuf/proto" - base "github.com/golang/protobuf/protoc-gen-go/testdata/extension_base" - user "github.com/golang/protobuf/protoc-gen-go/testdata/extension_user" -) - -func TestSingleFieldExtension(t *testing.T) { - bm := &base.BaseMessage{ - Height: proto.Int32(178), - } - - // Use extension within scope of another type. - vol := proto.Uint32(11) - err := proto.SetExtension(bm, user.E_LoudMessage_Volume, vol) - if err != nil { - t.Fatal("Failed setting extension:", err) - } - buf, err := proto.Marshal(bm) - if err != nil { - t.Fatal("Failed encoding message with extension:", err) - } - bm_new := new(base.BaseMessage) - if err := proto.Unmarshal(buf, bm_new); err != nil { - t.Fatal("Failed decoding message with extension:", err) - } - if !proto.HasExtension(bm_new, user.E_LoudMessage_Volume) { - t.Fatal("Decoded message didn't contain extension.") - } - vol_out, err := proto.GetExtension(bm_new, user.E_LoudMessage_Volume) - if err != nil { - t.Fatal("Failed getting extension:", err) - } - if v := vol_out.(*uint32); *v != *vol { - t.Errorf("vol_out = %v, expected %v", *v, *vol) - } - proto.ClearExtension(bm_new, user.E_LoudMessage_Volume) - if proto.HasExtension(bm_new, user.E_LoudMessage_Volume) { - t.Fatal("Failed clearing extension.") - } -} - -func TestMessageExtension(t *testing.T) { - bm := &base.BaseMessage{ - Height: proto.Int32(179), - } - - // Use extension that is itself a message. - um := &user.UserMessage{ - Name: proto.String("Dave"), - Rank: proto.String("Major"), - } - err := proto.SetExtension(bm, user.E_LoginMessage_UserMessage, um) - if err != nil { - t.Fatal("Failed setting extension:", err) - } - buf, err := proto.Marshal(bm) - if err != nil { - t.Fatal("Failed encoding message with extension:", err) - } - bm_new := new(base.BaseMessage) - if err := proto.Unmarshal(buf, bm_new); err != nil { - t.Fatal("Failed decoding message with extension:", err) - } - if !proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) { - t.Fatal("Decoded message didn't contain extension.") - } - um_out, err := proto.GetExtension(bm_new, user.E_LoginMessage_UserMessage) - if err != nil { - t.Fatal("Failed getting extension:", err) - } - if n := um_out.(*user.UserMessage).Name; *n != *um.Name { - t.Errorf("um_out.Name = %q, expected %q", *n, *um.Name) - } - if r := um_out.(*user.UserMessage).Rank; *r != *um.Rank { - t.Errorf("um_out.Rank = %q, expected %q", *r, *um.Rank) - } - proto.ClearExtension(bm_new, user.E_LoginMessage_UserMessage) - if proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) { - t.Fatal("Failed clearing extension.") - } -} - -func TestTopLevelExtension(t *testing.T) { - bm := &base.BaseMessage{ - Height: proto.Int32(179), - } - - width := proto.Int32(17) - err := proto.SetExtension(bm, user.E_Width, width) - if err != nil { - t.Fatal("Failed setting extension:", err) - } - buf, err := proto.Marshal(bm) - if err != nil { - t.Fatal("Failed encoding message with extension:", err) - } - bm_new := new(base.BaseMessage) - if err := proto.Unmarshal(buf, bm_new); err != nil { - t.Fatal("Failed decoding message with extension:", err) - } - if !proto.HasExtension(bm_new, user.E_Width) { - t.Fatal("Decoded message didn't contain extension.") - } - width_out, err := proto.GetExtension(bm_new, user.E_Width) - if err != nil { - t.Fatal("Failed getting extension:", err) - } - if w := width_out.(*int32); *w != *width { - t.Errorf("width_out = %v, expected %v", *w, *width) - } - proto.ClearExtension(bm_new, user.E_Width) - if proto.HasExtension(bm_new, user.E_Width) { - t.Fatal("Failed clearing extension.") - } -} - -func TestMessageSetWireFormat(t *testing.T) { - osm := new(base.OldStyleMessage) - osp := &user.OldStyleParcel{ - Name: proto.String("Dave"), - Height: proto.Int32(178), - } - - err := proto.SetExtension(osm, user.E_OldStyleParcel_MessageSetExtension, osp) - if err != nil { - t.Fatal("Failed setting extension:", err) - } - - buf, err := proto.Marshal(osm) - if err != nil { - t.Fatal("Failed encoding message:", err) - } - - // Data generated from Python implementation. - expected := []byte{ - 11, 16, 209, 15, 26, 9, 10, 4, 68, 97, 118, 101, 16, 178, 1, 12, - } - - if !bytes.Equal(expected, buf) { - t.Errorf("Encoding mismatch.\nwant %+v\n got %+v", expected, buf) - } - - // Check that it is restored correctly. - osm = new(base.OldStyleMessage) - if err := proto.Unmarshal(buf, osm); err != nil { - t.Fatal("Failed decoding message:", err) - } - osp_out, err := proto.GetExtension(osm, user.E_OldStyleParcel_MessageSetExtension) - if err != nil { - t.Fatal("Failed getting extension:", err) - } - osp = osp_out.(*user.OldStyleParcel) - if *osp.Name != "Dave" || *osp.Height != 178 { - t.Errorf("Retrieved extension from decoded message is not correct: %+v", osp) - } -} - -func main() { - // simpler than rigging up gotest - testing.Main(regexp.MatchString, []testing.InternalTest{ - {"TestSingleFieldExtension", TestSingleFieldExtension}, - {"TestMessageExtension", TestMessageExtension}, - {"TestTopLevelExtension", TestTopLevelExtension}, - }, - []testing.InternalBenchmark{}, - []testing.InternalExample{}) -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.pb.go deleted file mode 100644 index c7187921..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.pb.go +++ /dev/null @@ -1,401 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: extension_user/extension_user.proto - -package extension_user // import "github.com/golang/protobuf/protoc-gen-go/testdata/extension_user" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import extension_base "github.com/golang/protobuf/protoc-gen-go/testdata/extension_base" -import extension_extra "github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type UserMessage struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Rank *string `protobuf:"bytes,2,opt,name=rank" json:"rank,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UserMessage) Reset() { *m = UserMessage{} } -func (m *UserMessage) String() string { return proto.CompactTextString(m) } -func (*UserMessage) ProtoMessage() {} -func (*UserMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{0} -} -func (m *UserMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UserMessage.Unmarshal(m, b) -} -func (m *UserMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UserMessage.Marshal(b, m, deterministic) -} -func (dst *UserMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserMessage.Merge(dst, src) -} -func (m *UserMessage) XXX_Size() int { - return xxx_messageInfo_UserMessage.Size(m) -} -func (m *UserMessage) XXX_DiscardUnknown() { - xxx_messageInfo_UserMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_UserMessage proto.InternalMessageInfo - -func (m *UserMessage) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *UserMessage) GetRank() string { - if m != nil && m.Rank != nil { - return *m.Rank - } - return "" -} - -// Extend inside the scope of another type -type LoudMessage struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LoudMessage) Reset() { *m = LoudMessage{} } -func (m *LoudMessage) String() string { return proto.CompactTextString(m) } -func (*LoudMessage) ProtoMessage() {} -func (*LoudMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{1} -} - -var extRange_LoudMessage = []proto.ExtensionRange{ - {Start: 100, End: 536870911}, -} - -func (*LoudMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_LoudMessage -} -func (m *LoudMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LoudMessage.Unmarshal(m, b) -} -func (m *LoudMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LoudMessage.Marshal(b, m, deterministic) -} -func (dst *LoudMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_LoudMessage.Merge(dst, src) -} -func (m *LoudMessage) XXX_Size() int { - return xxx_messageInfo_LoudMessage.Size(m) -} -func (m *LoudMessage) XXX_DiscardUnknown() { - xxx_messageInfo_LoudMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_LoudMessage proto.InternalMessageInfo - -var E_LoudMessage_Volume = &proto.ExtensionDesc{ - ExtendedType: (*extension_base.BaseMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 8, - Name: "extension_user.LoudMessage.volume", - Tag: "varint,8,opt,name=volume", - Filename: "extension_user/extension_user.proto", -} - -// Extend inside the scope of another type, using a message. -type LoginMessage struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LoginMessage) Reset() { *m = LoginMessage{} } -func (m *LoginMessage) String() string { return proto.CompactTextString(m) } -func (*LoginMessage) ProtoMessage() {} -func (*LoginMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{2} -} -func (m *LoginMessage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LoginMessage.Unmarshal(m, b) -} -func (m *LoginMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LoginMessage.Marshal(b, m, deterministic) -} -func (dst *LoginMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_LoginMessage.Merge(dst, src) -} -func (m *LoginMessage) XXX_Size() int { - return xxx_messageInfo_LoginMessage.Size(m) -} -func (m *LoginMessage) XXX_DiscardUnknown() { - xxx_messageInfo_LoginMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_LoginMessage proto.InternalMessageInfo - -var E_LoginMessage_UserMessage = &proto.ExtensionDesc{ - ExtendedType: (*extension_base.BaseMessage)(nil), - ExtensionType: (*UserMessage)(nil), - Field: 16, - Name: "extension_user.LoginMessage.user_message", - Tag: "bytes,16,opt,name=user_message,json=userMessage", - Filename: "extension_user/extension_user.proto", -} - -type Detail struct { - Color *string `protobuf:"bytes,1,opt,name=color" json:"color,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Detail) Reset() { *m = Detail{} } -func (m *Detail) String() string { return proto.CompactTextString(m) } -func (*Detail) ProtoMessage() {} -func (*Detail) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{3} -} -func (m *Detail) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Detail.Unmarshal(m, b) -} -func (m *Detail) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Detail.Marshal(b, m, deterministic) -} -func (dst *Detail) XXX_Merge(src proto.Message) { - xxx_messageInfo_Detail.Merge(dst, src) -} -func (m *Detail) XXX_Size() int { - return xxx_messageInfo_Detail.Size(m) -} -func (m *Detail) XXX_DiscardUnknown() { - xxx_messageInfo_Detail.DiscardUnknown(m) -} - -var xxx_messageInfo_Detail proto.InternalMessageInfo - -func (m *Detail) GetColor() string { - if m != nil && m.Color != nil { - return *m.Color - } - return "" -} - -// An extension of an extension -type Announcement struct { - Words *string `protobuf:"bytes,1,opt,name=words" json:"words,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Announcement) Reset() { *m = Announcement{} } -func (m *Announcement) String() string { return proto.CompactTextString(m) } -func (*Announcement) ProtoMessage() {} -func (*Announcement) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{4} -} -func (m *Announcement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Announcement.Unmarshal(m, b) -} -func (m *Announcement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Announcement.Marshal(b, m, deterministic) -} -func (dst *Announcement) XXX_Merge(src proto.Message) { - xxx_messageInfo_Announcement.Merge(dst, src) -} -func (m *Announcement) XXX_Size() int { - return xxx_messageInfo_Announcement.Size(m) -} -func (m *Announcement) XXX_DiscardUnknown() { - xxx_messageInfo_Announcement.DiscardUnknown(m) -} - -var xxx_messageInfo_Announcement proto.InternalMessageInfo - -func (m *Announcement) GetWords() string { - if m != nil && m.Words != nil { - return *m.Words - } - return "" -} - -var E_Announcement_LoudExt = &proto.ExtensionDesc{ - ExtendedType: (*LoudMessage)(nil), - ExtensionType: (*Announcement)(nil), - Field: 100, - Name: "extension_user.Announcement.loud_ext", - Tag: "bytes,100,opt,name=loud_ext,json=loudExt", - Filename: "extension_user/extension_user.proto", -} - -// Something that can be put in a message set. -type OldStyleParcel struct { - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` - Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OldStyleParcel) Reset() { *m = OldStyleParcel{} } -func (m *OldStyleParcel) String() string { return proto.CompactTextString(m) } -func (*OldStyleParcel) ProtoMessage() {} -func (*OldStyleParcel) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{5} -} -func (m *OldStyleParcel) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OldStyleParcel.Unmarshal(m, b) -} -func (m *OldStyleParcel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OldStyleParcel.Marshal(b, m, deterministic) -} -func (dst *OldStyleParcel) XXX_Merge(src proto.Message) { - xxx_messageInfo_OldStyleParcel.Merge(dst, src) -} -func (m *OldStyleParcel) XXX_Size() int { - return xxx_messageInfo_OldStyleParcel.Size(m) -} -func (m *OldStyleParcel) XXX_DiscardUnknown() { - xxx_messageInfo_OldStyleParcel.DiscardUnknown(m) -} - -var xxx_messageInfo_OldStyleParcel proto.InternalMessageInfo - -func (m *OldStyleParcel) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OldStyleParcel) GetHeight() int32 { - if m != nil && m.Height != nil { - return *m.Height - } - return 0 -} - -var E_OldStyleParcel_MessageSetExtension = &proto.ExtensionDesc{ - ExtendedType: (*extension_base.OldStyleMessage)(nil), - ExtensionType: (*OldStyleParcel)(nil), - Field: 2001, - Name: "extension_user.OldStyleParcel", - Tag: "bytes,2001,opt,name=message_set_extension,json=messageSetExtension", - Filename: "extension_user/extension_user.proto", -} - -var E_UserMessage = &proto.ExtensionDesc{ - ExtendedType: (*extension_base.BaseMessage)(nil), - ExtensionType: (*UserMessage)(nil), - Field: 5, - Name: "extension_user.user_message", - Tag: "bytes,5,opt,name=user_message,json=userMessage", - Filename: "extension_user/extension_user.proto", -} - -var E_ExtraMessage = &proto.ExtensionDesc{ - ExtendedType: (*extension_base.BaseMessage)(nil), - ExtensionType: (*extension_extra.ExtraMessage)(nil), - Field: 9, - Name: "extension_user.extra_message", - Tag: "bytes,9,opt,name=extra_message,json=extraMessage", - Filename: "extension_user/extension_user.proto", -} - -var E_Width = &proto.ExtensionDesc{ - ExtendedType: (*extension_base.BaseMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 6, - Name: "extension_user.width", - Tag: "varint,6,opt,name=width", - Filename: "extension_user/extension_user.proto", -} - -var E_Area = &proto.ExtensionDesc{ - ExtendedType: (*extension_base.BaseMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 7, - Name: "extension_user.area", - Tag: "varint,7,opt,name=area", - Filename: "extension_user/extension_user.proto", -} - -var E_Detail = &proto.ExtensionDesc{ - ExtendedType: (*extension_base.BaseMessage)(nil), - ExtensionType: ([]*Detail)(nil), - Field: 17, - Name: "extension_user.detail", - Tag: "bytes,17,rep,name=detail", - Filename: "extension_user/extension_user.proto", -} - -func init() { - proto.RegisterType((*UserMessage)(nil), "extension_user.UserMessage") - proto.RegisterType((*LoudMessage)(nil), "extension_user.LoudMessage") - proto.RegisterType((*LoginMessage)(nil), "extension_user.LoginMessage") - proto.RegisterType((*Detail)(nil), "extension_user.Detail") - proto.RegisterType((*Announcement)(nil), "extension_user.Announcement") - proto.RegisterMessageSetType((*OldStyleParcel)(nil), 2001, "extension_user.OldStyleParcel") - proto.RegisterType((*OldStyleParcel)(nil), "extension_user.OldStyleParcel") - proto.RegisterExtension(E_LoudMessage_Volume) - proto.RegisterExtension(E_LoginMessage_UserMessage) - proto.RegisterExtension(E_Announcement_LoudExt) - proto.RegisterExtension(E_OldStyleParcel_MessageSetExtension) - proto.RegisterExtension(E_UserMessage) - proto.RegisterExtension(E_ExtraMessage) - proto.RegisterExtension(E_Width) - proto.RegisterExtension(E_Area) - proto.RegisterExtension(E_Detail) -} - -func init() { - proto.RegisterFile("extension_user/extension_user.proto", fileDescriptor_extension_user_af41b5e0bdfb7846) -} - -var fileDescriptor_extension_user_af41b5e0bdfb7846 = []byte{ - // 492 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x6f, 0x94, 0x40, - 0x10, 0x0e, 0x6d, 0x8f, 0x5e, 0x87, 0x6b, 0xad, 0xa8, 0xcd, 0xa5, 0x6a, 0x25, 0x18, 0x13, 0x62, - 0xd2, 0x23, 0x62, 0x7c, 0xe1, 0x49, 0x2f, 0xde, 0x93, 0x67, 0x34, 0x54, 0x5f, 0xf4, 0x81, 0xec, - 0xc1, 0xc8, 0x91, 0xc2, 0xae, 0xd9, 0x5d, 0xec, 0xe9, 0xd3, 0xfd, 0x26, 0xff, 0x89, 0xff, 0xc8, - 0xb0, 0x2c, 0x2d, 0x87, 0xc9, 0xc5, 0xbe, 0x90, 0xfd, 0x86, 0x6f, 0xbe, 0x99, 0xfd, 0x66, 0x00, - 0x9e, 0xe2, 0x4a, 0x22, 0x15, 0x39, 0xa3, 0x71, 0x25, 0x90, 0xfb, 0x9b, 0x70, 0xf2, 0x9d, 0x33, - 0xc9, 0xec, 0xa3, 0xcd, 0xe8, 0x69, 0x27, 0x69, 0x41, 0x04, 0xfa, 0x9b, 0xb0, 0x49, 0x3a, 0x7d, - 0x76, 0x13, 0xc5, 0x95, 0xe4, 0xc4, 0xef, 0xe1, 0x86, 0xe6, 0xbe, 0x02, 0xeb, 0xb3, 0x40, 0xfe, - 0x1e, 0x85, 0x20, 0x19, 0xda, 0x36, 0xec, 0x51, 0x52, 0xe2, 0xd8, 0x70, 0x0c, 0xef, 0x20, 0x52, - 0xe7, 0x3a, 0xc6, 0x09, 0xbd, 0x1c, 0xef, 0x34, 0xb1, 0xfa, 0xec, 0xce, 0xc1, 0x9a, 0xb3, 0x2a, - 0xd5, 0x69, 0xcf, 0x87, 0xc3, 0xf4, 0x78, 0xbd, 0x5e, 0xaf, 0x77, 0x82, 0x97, 0x60, 0xfe, 0x60, - 0x45, 0x55, 0xa2, 0xfd, 0x70, 0xd2, 0xeb, 0x6b, 0x4a, 0x04, 0xea, 0x84, 0xf1, 0xd0, 0x31, 0xbc, - 0xc3, 0x48, 0x53, 0xdd, 0x4b, 0x18, 0xcd, 0x59, 0x96, 0x53, 0xfd, 0x36, 0xf8, 0x0a, 0xa3, 0xfa, - 0xa2, 0x71, 0xa9, 0xbb, 0xda, 0x2a, 0x75, 0xec, 0x18, 0x9e, 0x15, 0x74, 0x29, 0xca, 0xba, 0xce, - 0xad, 0x22, 0xab, 0xba, 0x01, 0xee, 0x19, 0x98, 0x6f, 0x51, 0x92, 0xbc, 0xb0, 0xef, 0xc3, 0x20, - 0x61, 0x05, 0xe3, 0xfa, 0xb6, 0x0d, 0x70, 0x7f, 0xc1, 0xe8, 0x0d, 0xa5, 0xac, 0xa2, 0x09, 0x96, - 0x48, 0x65, 0xcd, 0xba, 0x62, 0x3c, 0x15, 0x2d, 0x4b, 0x81, 0xe0, 0x13, 0x0c, 0x0b, 0x56, 0xa5, - 0xb5, 0x97, 0xf6, 0x3f, 0xb5, 0x3b, 0xd6, 0x8c, 0x53, 0xd5, 0xde, 0xa3, 0x3e, 0xa5, 0x5b, 0x22, - 0xda, 0xaf, 0xa5, 0x66, 0x2b, 0xe9, 0xfe, 0x36, 0xe0, 0xe8, 0x43, 0x91, 0x5e, 0xc8, 0x9f, 0x05, - 0x7e, 0x24, 0x3c, 0xc1, 0xa2, 0x33, 0x91, 0x9d, 0xeb, 0x89, 0x9c, 0x80, 0xb9, 0xc4, 0x3c, 0x5b, - 0x4a, 0x35, 0x93, 0x41, 0xa4, 0x51, 0x20, 0xe1, 0x81, 0xb6, 0x2c, 0x16, 0x28, 0xe3, 0xeb, 0x92, - 0xf6, 0x93, 0xbe, 0x81, 0x6d, 0x91, 0xb6, 0xcb, 0x3f, 0x77, 0x54, 0x9b, 0x67, 0xfd, 0x36, 0x37, - 0x9b, 0x89, 0xee, 0x69, 0xf9, 0x0b, 0x94, 0xb3, 0x96, 0x18, 0xde, 0x6a, 0x5a, 0x83, 0xdb, 0x4d, - 0x2b, 0x8c, 0xe1, 0x50, 0xad, 0xeb, 0xff, 0xa9, 0x1f, 0x28, 0xf5, 0xc7, 0x93, 0xfe, 0xae, 0xcf, - 0xea, 0x67, 0xab, 0x3f, 0xc2, 0x0e, 0x0a, 0x5f, 0xc0, 0xe0, 0x2a, 0x4f, 0xe5, 0x72, 0xbb, 0xb0, - 0xa9, 0x7c, 0x6e, 0x98, 0xa1, 0x0f, 0x7b, 0x84, 0x23, 0xd9, 0x9e, 0xb1, 0xef, 0x18, 0xde, 0x6e, - 0xa4, 0x88, 0xe1, 0x3b, 0x30, 0xd3, 0x66, 0xe5, 0xb6, 0xa6, 0xdc, 0x75, 0x76, 0x3d, 0x2b, 0x38, - 0xe9, 0x7b, 0xd3, 0x6c, 0x6b, 0xa4, 0x25, 0xa6, 0xd3, 0x2f, 0xaf, 0xb3, 0x5c, 0x2e, 0xab, 0xc5, - 0x24, 0x61, 0xa5, 0x9f, 0xb1, 0x82, 0xd0, 0xcc, 0x57, 0x1f, 0xf3, 0xa2, 0xfa, 0xd6, 0x1c, 0x92, - 0xf3, 0x0c, 0xe9, 0x79, 0xc6, 0x7c, 0x89, 0x42, 0xa6, 0x44, 0x92, 0xde, 0x7f, 0xe5, 0x6f, 0x00, - 0x00, 0x00, 0xff, 0xff, 0xdf, 0x18, 0x64, 0x15, 0x77, 0x04, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.proto deleted file mode 100644 index 033c186c..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.proto +++ /dev/null @@ -1,102 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; - -import "extension_base/extension_base.proto"; -import "extension_extra/extension_extra.proto"; - -package extension_user; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/extension_user"; - -message UserMessage { - optional string name = 1; - optional string rank = 2; -} - -// Extend with a message -extend extension_base.BaseMessage { - optional UserMessage user_message = 5; -} - -// Extend with a foreign message -extend extension_base.BaseMessage { - optional extension_extra.ExtraMessage extra_message = 9; -} - -// Extend with some primitive types -extend extension_base.BaseMessage { - optional int32 width = 6; - optional int64 area = 7; -} - -// Extend inside the scope of another type -message LoudMessage { - extend extension_base.BaseMessage { - optional uint32 volume = 8; - } - extensions 100 to max; -} - -// Extend inside the scope of another type, using a message. -message LoginMessage { - extend extension_base.BaseMessage { - optional UserMessage user_message = 16; - } -} - -// Extend with a repeated field -extend extension_base.BaseMessage { - repeated Detail detail = 17; -} - -message Detail { - optional string color = 1; -} - -// An extension of an extension -message Announcement { - optional string words = 1; - extend LoudMessage { - optional Announcement loud_ext = 100; - } -} - -// Something that can be put in a message set. -message OldStyleParcel { - extend extension_base.OldStyleMessage { - optional OldStyleParcel message_set_extension = 2001; - } - - required string name = 1; - optional int32 height = 2; -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go deleted file mode 100644 index 1bc0283f..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go +++ /dev/null @@ -1,444 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc/grpc.proto - -package testing // import "github.com/golang/protobuf/protoc-gen-go/testdata/grpc" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type SimpleRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } -func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } -func (*SimpleRequest) ProtoMessage() {} -func (*SimpleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_grpc_65bf3902e49ee873, []int{0} -} -func (m *SimpleRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SimpleRequest.Unmarshal(m, b) -} -func (m *SimpleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SimpleRequest.Marshal(b, m, deterministic) -} -func (dst *SimpleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SimpleRequest.Merge(dst, src) -} -func (m *SimpleRequest) XXX_Size() int { - return xxx_messageInfo_SimpleRequest.Size(m) -} -func (m *SimpleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SimpleRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SimpleRequest proto.InternalMessageInfo - -type SimpleResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } -func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } -func (*SimpleResponse) ProtoMessage() {} -func (*SimpleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_grpc_65bf3902e49ee873, []int{1} -} -func (m *SimpleResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SimpleResponse.Unmarshal(m, b) -} -func (m *SimpleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SimpleResponse.Marshal(b, m, deterministic) -} -func (dst *SimpleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SimpleResponse.Merge(dst, src) -} -func (m *SimpleResponse) XXX_Size() int { - return xxx_messageInfo_SimpleResponse.Size(m) -} -func (m *SimpleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SimpleResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SimpleResponse proto.InternalMessageInfo - -type StreamMsg struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StreamMsg) Reset() { *m = StreamMsg{} } -func (m *StreamMsg) String() string { return proto.CompactTextString(m) } -func (*StreamMsg) ProtoMessage() {} -func (*StreamMsg) Descriptor() ([]byte, []int) { - return fileDescriptor_grpc_65bf3902e49ee873, []int{2} -} -func (m *StreamMsg) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamMsg.Unmarshal(m, b) -} -func (m *StreamMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamMsg.Marshal(b, m, deterministic) -} -func (dst *StreamMsg) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamMsg.Merge(dst, src) -} -func (m *StreamMsg) XXX_Size() int { - return xxx_messageInfo_StreamMsg.Size(m) -} -func (m *StreamMsg) XXX_DiscardUnknown() { - xxx_messageInfo_StreamMsg.DiscardUnknown(m) -} - -var xxx_messageInfo_StreamMsg proto.InternalMessageInfo - -type StreamMsg2 struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StreamMsg2) Reset() { *m = StreamMsg2{} } -func (m *StreamMsg2) String() string { return proto.CompactTextString(m) } -func (*StreamMsg2) ProtoMessage() {} -func (*StreamMsg2) Descriptor() ([]byte, []int) { - return fileDescriptor_grpc_65bf3902e49ee873, []int{3} -} -func (m *StreamMsg2) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamMsg2.Unmarshal(m, b) -} -func (m *StreamMsg2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamMsg2.Marshal(b, m, deterministic) -} -func (dst *StreamMsg2) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamMsg2.Merge(dst, src) -} -func (m *StreamMsg2) XXX_Size() int { - return xxx_messageInfo_StreamMsg2.Size(m) -} -func (m *StreamMsg2) XXX_DiscardUnknown() { - xxx_messageInfo_StreamMsg2.DiscardUnknown(m) -} - -var xxx_messageInfo_StreamMsg2 proto.InternalMessageInfo - -func init() { - proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") - proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") - proto.RegisterType((*StreamMsg)(nil), "grpc.testing.StreamMsg") - proto.RegisterType((*StreamMsg2)(nil), "grpc.testing.StreamMsg2") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// TestClient is the client API for Test service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TestClient interface { - UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) - // This RPC streams from the server only. - Downstream(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (Test_DownstreamClient, error) - // This RPC streams from the client. - Upstream(ctx context.Context, opts ...grpc.CallOption) (Test_UpstreamClient, error) - // This one streams in both directions. - Bidi(ctx context.Context, opts ...grpc.CallOption) (Test_BidiClient, error) -} - -type testClient struct { - cc *grpc.ClientConn -} - -func NewTestClient(cc *grpc.ClientConn) TestClient { - return &testClient{cc} -} - -func (c *testClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { - out := new(SimpleResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.Test/UnaryCall", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testClient) Downstream(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (Test_DownstreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_Test_serviceDesc.Streams[0], "/grpc.testing.Test/Downstream", opts...) - if err != nil { - return nil, err - } - x := &testDownstreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Test_DownstreamClient interface { - Recv() (*StreamMsg, error) - grpc.ClientStream -} - -type testDownstreamClient struct { - grpc.ClientStream -} - -func (x *testDownstreamClient) Recv() (*StreamMsg, error) { - m := new(StreamMsg) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testClient) Upstream(ctx context.Context, opts ...grpc.CallOption) (Test_UpstreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_Test_serviceDesc.Streams[1], "/grpc.testing.Test/Upstream", opts...) - if err != nil { - return nil, err - } - x := &testUpstreamClient{stream} - return x, nil -} - -type Test_UpstreamClient interface { - Send(*StreamMsg) error - CloseAndRecv() (*SimpleResponse, error) - grpc.ClientStream -} - -type testUpstreamClient struct { - grpc.ClientStream -} - -func (x *testUpstreamClient) Send(m *StreamMsg) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testUpstreamClient) CloseAndRecv() (*SimpleResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(SimpleResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testClient) Bidi(ctx context.Context, opts ...grpc.CallOption) (Test_BidiClient, error) { - stream, err := c.cc.NewStream(ctx, &_Test_serviceDesc.Streams[2], "/grpc.testing.Test/Bidi", opts...) - if err != nil { - return nil, err - } - x := &testBidiClient{stream} - return x, nil -} - -type Test_BidiClient interface { - Send(*StreamMsg) error - Recv() (*StreamMsg2, error) - grpc.ClientStream -} - -type testBidiClient struct { - grpc.ClientStream -} - -func (x *testBidiClient) Send(m *StreamMsg) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testBidiClient) Recv() (*StreamMsg2, error) { - m := new(StreamMsg2) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// TestServer is the server API for Test service. -type TestServer interface { - UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) - // This RPC streams from the server only. - Downstream(*SimpleRequest, Test_DownstreamServer) error - // This RPC streams from the client. - Upstream(Test_UpstreamServer) error - // This one streams in both directions. - Bidi(Test_BidiServer) error -} - -func RegisterTestServer(s *grpc.Server, srv TestServer) { - s.RegisterService(&_Test_serviceDesc, srv) -} - -func _Test_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SimpleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServer).UnaryCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.Test/UnaryCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServer).UnaryCall(ctx, req.(*SimpleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Test_Downstream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SimpleRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(TestServer).Downstream(m, &testDownstreamServer{stream}) -} - -type Test_DownstreamServer interface { - Send(*StreamMsg) error - grpc.ServerStream -} - -type testDownstreamServer struct { - grpc.ServerStream -} - -func (x *testDownstreamServer) Send(m *StreamMsg) error { - return x.ServerStream.SendMsg(m) -} - -func _Test_Upstream_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServer).Upstream(&testUpstreamServer{stream}) -} - -type Test_UpstreamServer interface { - SendAndClose(*SimpleResponse) error - Recv() (*StreamMsg, error) - grpc.ServerStream -} - -type testUpstreamServer struct { - grpc.ServerStream -} - -func (x *testUpstreamServer) SendAndClose(m *SimpleResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testUpstreamServer) Recv() (*StreamMsg, error) { - m := new(StreamMsg) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Test_Bidi_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServer).Bidi(&testBidiServer{stream}) -} - -type Test_BidiServer interface { - Send(*StreamMsg2) error - Recv() (*StreamMsg, error) - grpc.ServerStream -} - -type testBidiServer struct { - grpc.ServerStream -} - -func (x *testBidiServer) Send(m *StreamMsg2) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testBidiServer) Recv() (*StreamMsg, error) { - m := new(StreamMsg) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Test_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.Test", - HandlerType: (*TestServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "UnaryCall", - Handler: _Test_UnaryCall_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Downstream", - Handler: _Test_Downstream_Handler, - ServerStreams: true, - }, - { - StreamName: "Upstream", - Handler: _Test_Upstream_Handler, - ClientStreams: true, - }, - { - StreamName: "Bidi", - Handler: _Test_Bidi_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/grpc.proto", -} - -func init() { proto.RegisterFile("grpc/grpc.proto", fileDescriptor_grpc_65bf3902e49ee873) } - -var fileDescriptor_grpc_65bf3902e49ee873 = []byte{ - // 244 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4f, 0x2f, 0x2a, 0x48, - 0xd6, 0x07, 0x11, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x3c, 0x60, 0x76, 0x49, 0x6a, 0x71, - 0x49, 0x66, 0x5e, 0xba, 0x12, 0x3f, 0x17, 0x6f, 0x70, 0x66, 0x6e, 0x41, 0x4e, 0x6a, 0x50, 0x6a, - 0x61, 0x69, 0x6a, 0x71, 0x89, 0x92, 0x00, 0x17, 0x1f, 0x4c, 0xa0, 0xb8, 0x20, 0x3f, 0xaf, 0x38, - 0x55, 0x89, 0x9b, 0x8b, 0x33, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0xd7, 0xb7, 0x38, 0x5d, 0x89, 0x87, - 0x8b, 0x0b, 0xce, 0x31, 0x32, 0x9a, 0xc1, 0xc4, 0xc5, 0x12, 0x92, 0x5a, 0x5c, 0x22, 0xe4, 0xc6, - 0xc5, 0x19, 0x9a, 0x97, 0x58, 0x54, 0xe9, 0x9c, 0x98, 0x93, 0x23, 0x24, 0xad, 0x87, 0x6c, 0x85, - 0x1e, 0x8a, 0xf9, 0x52, 0x32, 0xd8, 0x25, 0x21, 0x76, 0x09, 0xb9, 0x70, 0x71, 0xb9, 0xe4, 0x97, - 0xe7, 0x15, 0x83, 0xad, 0xc0, 0x6f, 0x90, 0x38, 0x9a, 0x24, 0xcc, 0x55, 0x06, 0x8c, 0x42, 0xce, - 0x5c, 0x1c, 0xa1, 0x05, 0x50, 0x33, 0x70, 0x29, 0xc3, 0xef, 0x10, 0x0d, 0x46, 0x21, 0x5b, 0x2e, - 0x16, 0xa7, 0xcc, 0x94, 0x4c, 0xdc, 0x06, 0x48, 0xe0, 0x90, 0x30, 0xd2, 0x60, 0x34, 0x60, 0x74, - 0x72, 0x88, 0xb2, 0x4b, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, - 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x07, 0xc7, 0x40, 0x52, 0x69, 0x1a, 0x84, 0x91, 0xac, 0x9b, 0x9e, - 0x9a, 0xa7, 0x9b, 0x9e, 0xaf, 0x0f, 0x32, 0x22, 0x25, 0xb1, 0x24, 0x11, 0x1c, 0x4d, 0xd6, 0x50, - 0x03, 0x93, 0xd8, 0xc0, 0x8a, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0xb9, 0x95, 0x42, - 0xc2, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.proto deleted file mode 100644 index 0e5c64a9..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.proto +++ /dev/null @@ -1,61 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package grpc.testing; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/grpc;testing"; - -message SimpleRequest { -} - -message SimpleResponse { -} - -message StreamMsg { -} - -message StreamMsg2 { -} - -service Test { - rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - - // This RPC streams from the server only. - rpc Downstream(SimpleRequest) returns (stream StreamMsg); - - // This RPC streams from the client. - rpc Upstream(stream StreamMsg) returns (SimpleResponse); - - // This one streams in both directions. - rpc Bidi(stream StreamMsg) returns (stream StreamMsg2); -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.pb.go deleted file mode 100644 index d67ada6d..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.pb.go +++ /dev/null @@ -1,110 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: import_public/a.proto - -package import_public // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import sub "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// M from public import import_public/sub/a.proto -type M = sub.M - -// E from public import import_public/sub/a.proto -type E = sub.E - -var E_name = sub.E_name -var E_value = sub.E_value - -const E_ZERO = E(sub.E_ZERO) - -// Ignoring public import of Local from import_public/b.proto - -type Public struct { - M *sub.M `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"` - E sub.E `protobuf:"varint,2,opt,name=e,proto3,enum=goproto.test.import_public.sub.E" json:"e,omitempty"` - Local *Local `protobuf:"bytes,3,opt,name=local,proto3" json:"local,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Public) Reset() { *m = Public{} } -func (m *Public) String() string { return proto.CompactTextString(m) } -func (*Public) ProtoMessage() {} -func (*Public) Descriptor() ([]byte, []int) { - return fileDescriptor_a_c0314c022b7c17d8, []int{0} -} -func (m *Public) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Public.Unmarshal(m, b) -} -func (m *Public) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Public.Marshal(b, m, deterministic) -} -func (dst *Public) XXX_Merge(src proto.Message) { - xxx_messageInfo_Public.Merge(dst, src) -} -func (m *Public) XXX_Size() int { - return xxx_messageInfo_Public.Size(m) -} -func (m *Public) XXX_DiscardUnknown() { - xxx_messageInfo_Public.DiscardUnknown(m) -} - -var xxx_messageInfo_Public proto.InternalMessageInfo - -func (m *Public) GetM() *sub.M { - if m != nil { - return m.M - } - return nil -} - -func (m *Public) GetE() sub.E { - if m != nil { - return m.E - } - return sub.E_ZERO -} - -func (m *Public) GetLocal() *Local { - if m != nil { - return m.Local - } - return nil -} - -func init() { - proto.RegisterType((*Public)(nil), "goproto.test.import_public.Public") -} - -func init() { proto.RegisterFile("import_public/a.proto", fileDescriptor_a_c0314c022b7c17d8) } - -var fileDescriptor_a_c0314c022b7c17d8 = []byte{ - // 200 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcd, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x4f, 0xd4, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x92, 0x4a, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, 0xf4, 0x50, 0xd4, 0x48, - 0x49, 0xa2, 0x6a, 0x29, 0x2e, 0x4d, 0x82, 0x69, 0x93, 0x42, 0x33, 0x2d, 0x09, 0x22, 0xac, 0xb4, - 0x98, 0x91, 0x8b, 0x2d, 0x00, 0x2c, 0x24, 0xa4, 0xcf, 0xc5, 0x98, 0x2b, 0xc1, 0xa8, 0xc0, 0xa8, - 0xc1, 0x6d, 0xa4, 0xa8, 0x87, 0xdb, 0x12, 0xbd, 0xe2, 0xd2, 0x24, 0x3d, 0xdf, 0x20, 0xc6, 0x5c, - 0x90, 0x86, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x3e, 0xc2, 0x1a, 0x5c, 0x83, 0x18, 0x53, 0x85, - 0xcc, 0xb9, 0x58, 0x73, 0xf2, 0x93, 0x13, 0x73, 0x24, 0x98, 0x09, 0xdb, 0xe2, 0x03, 0x52, 0x18, - 0x04, 0x51, 0xef, 0xe4, 0x18, 0x65, 0x9f, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, - 0xab, 0x9f, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x0f, 0xd6, 0x9a, 0x54, 0x9a, 0x06, 0x61, 0x24, - 0xeb, 0xa6, 0xa7, 0xe6, 0xe9, 0xa6, 0xe7, 0xeb, 0x83, 0xcc, 0x4a, 0x49, 0x2c, 0x49, 0xd4, 0x47, - 0x31, 0x2f, 0x80, 0x21, 0x80, 0x31, 0x89, 0x0d, 0xac, 0xd2, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, - 0x70, 0xc5, 0xc3, 0x79, 0x5a, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.proto deleted file mode 100644 index 957ad897..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.proto +++ /dev/null @@ -1,45 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package goproto.test.import_public; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public"; - -import public "import_public/sub/a.proto"; // Different Go package. -import public "import_public/b.proto"; // Same Go package. - -message Public { - goproto.test.import_public.sub.M m = 1; - goproto.test.import_public.sub.E e = 2; - Local local = 3; -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.pb.go deleted file mode 100644 index 24569abf..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.pb.go +++ /dev/null @@ -1,87 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: import_public/b.proto - -package import_public // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import sub "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Local struct { - M *sub.M `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"` - E sub.E `protobuf:"varint,2,opt,name=e,proto3,enum=goproto.test.import_public.sub.E" json:"e,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Local) Reset() { *m = Local{} } -func (m *Local) String() string { return proto.CompactTextString(m) } -func (*Local) ProtoMessage() {} -func (*Local) Descriptor() ([]byte, []int) { - return fileDescriptor_b_7f20a805fad67bd0, []int{0} -} -func (m *Local) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Local.Unmarshal(m, b) -} -func (m *Local) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Local.Marshal(b, m, deterministic) -} -func (dst *Local) XXX_Merge(src proto.Message) { - xxx_messageInfo_Local.Merge(dst, src) -} -func (m *Local) XXX_Size() int { - return xxx_messageInfo_Local.Size(m) -} -func (m *Local) XXX_DiscardUnknown() { - xxx_messageInfo_Local.DiscardUnknown(m) -} - -var xxx_messageInfo_Local proto.InternalMessageInfo - -func (m *Local) GetM() *sub.M { - if m != nil { - return m.M - } - return nil -} - -func (m *Local) GetE() sub.E { - if m != nil { - return m.E - } - return sub.E_ZERO -} - -func init() { - proto.RegisterType((*Local)(nil), "goproto.test.import_public.Local") -} - -func init() { proto.RegisterFile("import_public/b.proto", fileDescriptor_b_7f20a805fad67bd0) } - -var fileDescriptor_b_7f20a805fad67bd0 = []byte{ - // 174 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcd, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x4f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x92, 0x4a, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, 0xf4, 0x50, 0xd4, 0x48, - 0x49, 0xa2, 0x6a, 0x29, 0x2e, 0x4d, 0xd2, 0x4f, 0x84, 0x68, 0x53, 0xca, 0xe4, 0x62, 0xf5, 0xc9, - 0x4f, 0x4e, 0xcc, 0x11, 0xd2, 0xe7, 0x62, 0xcc, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x36, 0x52, - 0xd4, 0xc3, 0x6d, 0x96, 0x5e, 0x71, 0x69, 0x92, 0x9e, 0x6f, 0x10, 0x63, 0x2e, 0x48, 0x43, 0xaa, - 0x04, 0x93, 0x02, 0xa3, 0x06, 0x1f, 0x61, 0x0d, 0xae, 0x41, 0x8c, 0xa9, 0x4e, 0x8e, 0x51, 0xf6, - 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, 0x79, - 0xe9, 0xfa, 0x60, 0x6d, 0x49, 0xa5, 0x69, 0x10, 0x46, 0xb2, 0x6e, 0x7a, 0x6a, 0x9e, 0x6e, 0x7a, - 0xbe, 0x3e, 0xc8, 0x9c, 0x94, 0xc4, 0x92, 0x44, 0x7d, 0x14, 0xb3, 0x92, 0xd8, 0xc0, 0xaa, 0x8c, - 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x2b, 0x5f, 0x8e, 0x04, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.proto deleted file mode 100644 index 1dbca3e4..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.proto +++ /dev/null @@ -1,43 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package goproto.test.import_public; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public"; - -import "import_public/sub/a.proto"; - -message Local { - goproto.test.import_public.sub.M m = 1; - goproto.test.import_public.sub.E e = 2; -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.pb.go deleted file mode 100644 index be667c93..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.pb.go +++ /dev/null @@ -1,100 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: import_public/sub/a.proto - -package sub // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type E int32 - -const ( - E_ZERO E = 0 -) - -var E_name = map[int32]string{ - 0: "ZERO", -} -var E_value = map[string]int32{ - "ZERO": 0, -} - -func (x E) String() string { - return proto.EnumName(E_name, int32(x)) -} -func (E) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_a_91ca0264a534463a, []int{0} -} - -type M struct { - // Field using a type in the same Go package, but a different source file. - M2 *M2 `protobuf:"bytes,1,opt,name=m2,proto3" json:"m2,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *M) Reset() { *m = M{} } -func (m *M) String() string { return proto.CompactTextString(m) } -func (*M) ProtoMessage() {} -func (*M) Descriptor() ([]byte, []int) { - return fileDescriptor_a_91ca0264a534463a, []int{0} -} -func (m *M) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_M.Unmarshal(m, b) -} -func (m *M) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_M.Marshal(b, m, deterministic) -} -func (dst *M) XXX_Merge(src proto.Message) { - xxx_messageInfo_M.Merge(dst, src) -} -func (m *M) XXX_Size() int { - return xxx_messageInfo_M.Size(m) -} -func (m *M) XXX_DiscardUnknown() { - xxx_messageInfo_M.DiscardUnknown(m) -} - -var xxx_messageInfo_M proto.InternalMessageInfo - -func (m *M) GetM2() *M2 { - if m != nil { - return m.M2 - } - return nil -} - -func init() { - proto.RegisterType((*M)(nil), "goproto.test.import_public.sub.M") - proto.RegisterEnum("goproto.test.import_public.sub.E", E_name, E_value) -} - -func init() { proto.RegisterFile("import_public/sub/a.proto", fileDescriptor_a_91ca0264a534463a) } - -var fileDescriptor_a_91ca0264a534463a = []byte{ - // 172 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x2f, 0x2e, 0x4d, 0xd2, 0x4f, 0xd4, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x4b, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, - 0xf4, 0x50, 0xd4, 0xe9, 0x15, 0x97, 0x26, 0x49, 0x61, 0xd1, 0x9a, 0x04, 0xd1, 0xaa, 0x64, 0xce, - 0xc5, 0xe8, 0x2b, 0x64, 0xc4, 0xc5, 0x94, 0x6b, 0x24, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0xa4, - 0xa4, 0x87, 0xdf, 0x30, 0x3d, 0x5f, 0xa3, 0x20, 0xa6, 0x5c, 0x23, 0x2d, 0x5e, 0x2e, 0x46, 0x57, - 0x21, 0x0e, 0x2e, 0x96, 0x28, 0xd7, 0x20, 0x7f, 0x01, 0x06, 0x27, 0xd7, 0x28, 0xe7, 0xf4, 0xcc, - 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0x7d, - 0xb0, 0x39, 0x49, 0xa5, 0x69, 0x10, 0x46, 0xb2, 0x6e, 0x7a, 0x6a, 0x9e, 0x6e, 0x7a, 0xbe, 0x3e, - 0xc8, 0xe0, 0x94, 0xc4, 0x92, 0x44, 0x7d, 0x0c, 0x67, 0x25, 0xb1, 0x81, 0x55, 0x1a, 0x03, 0x02, - 0x00, 0x00, 0xff, 0xff, 0x81, 0xcc, 0x07, 0x7d, 0xed, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.proto deleted file mode 100644 index 4494c818..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.proto +++ /dev/null @@ -1,47 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package goproto.test.import_public.sub; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub"; - -import "import_public/sub/b.proto"; - -message M { - // Field using a type in the same Go package, but a different source file. - M2 m2 = 1; -} - -enum E { - ZERO = 0; -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.pb.go deleted file mode 100644 index d57a3bb9..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.pb.go +++ /dev/null @@ -1,67 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: import_public/sub/b.proto - -package sub // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type M2 struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *M2) Reset() { *m = M2{} } -func (m *M2) String() string { return proto.CompactTextString(m) } -func (*M2) ProtoMessage() {} -func (*M2) Descriptor() ([]byte, []int) { - return fileDescriptor_b_eba25180453d86b4, []int{0} -} -func (m *M2) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_M2.Unmarshal(m, b) -} -func (m *M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_M2.Marshal(b, m, deterministic) -} -func (dst *M2) XXX_Merge(src proto.Message) { - xxx_messageInfo_M2.Merge(dst, src) -} -func (m *M2) XXX_Size() int { - return xxx_messageInfo_M2.Size(m) -} -func (m *M2) XXX_DiscardUnknown() { - xxx_messageInfo_M2.DiscardUnknown(m) -} - -var xxx_messageInfo_M2 proto.InternalMessageInfo - -func init() { - proto.RegisterType((*M2)(nil), "goproto.test.import_public.sub.M2") -} - -func init() { proto.RegisterFile("import_public/sub/b.proto", fileDescriptor_b_eba25180453d86b4) } - -var fileDescriptor_b_eba25180453d86b4 = []byte{ - // 127 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x2f, 0x2e, 0x4d, 0xd2, 0x4f, 0xd2, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x4b, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, - 0xf4, 0x50, 0xd4, 0xe9, 0x15, 0x97, 0x26, 0x29, 0xb1, 0x70, 0x31, 0xf9, 0x1a, 0x39, 0xb9, 0x46, - 0x39, 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, - 0xe6, 0xa5, 0xeb, 0x83, 0xf5, 0x25, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, 0xe9, 0xa9, 0x79, 0xba, - 0xe9, 0xf9, 0xfa, 0x20, 0x83, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0x31, 0x2c, 0x4d, 0x62, 0x03, 0xab, - 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x64, 0x42, 0xe4, 0xa8, 0x90, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.proto deleted file mode 100644 index c7299e0f..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.proto +++ /dev/null @@ -1,39 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package goproto.test.import_public.sub; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub"; - -message M2 { -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public_test.go deleted file mode 100644 index 7ef776bf..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.9 - -package testdata - -import ( - "testing" - - mainpb "github.com/golang/protobuf/protoc-gen-go/testdata/import_public" - subpb "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" -) - -func TestImportPublicLink(t *testing.T) { - // mainpb.[ME] should be interchangable with subpb.[ME]. - var _ mainpb.M = subpb.M{} - var _ mainpb.E = subpb.E(0) - _ = &mainpb.Public{ - M: &mainpb.M{}, - E: mainpb.E_ZERO, - Local: &mainpb.Local{ - M: &mainpb.M{}, - E: mainpb.E_ZERO, - }, - } - _ = &mainpb.Public{ - M: &subpb.M{}, - E: subpb.E_ZERO, - Local: &mainpb.Local{ - M: &subpb.M{}, - E: subpb.E_ZERO, - }, - } - _ = &mainpb.M{ - M2: &subpb.M2{}, - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.pb.go deleted file mode 100644 index ca312d6c..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.pb.go +++ /dev/null @@ -1,66 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: imports/fmt/m.proto - -package fmt // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type M struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *M) Reset() { *m = M{} } -func (m *M) String() string { return proto.CompactTextString(m) } -func (*M) ProtoMessage() {} -func (*M) Descriptor() ([]byte, []int) { - return fileDescriptor_m_867dd34c461422b8, []int{0} -} -func (m *M) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_M.Unmarshal(m, b) -} -func (m *M) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_M.Marshal(b, m, deterministic) -} -func (dst *M) XXX_Merge(src proto.Message) { - xxx_messageInfo_M.Merge(dst, src) -} -func (m *M) XXX_Size() int { - return xxx_messageInfo_M.Size(m) -} -func (m *M) XXX_DiscardUnknown() { - xxx_messageInfo_M.DiscardUnknown(m) -} - -var xxx_messageInfo_M proto.InternalMessageInfo - -func init() { - proto.RegisterType((*M)(nil), "fmt.M") -} - -func init() { proto.RegisterFile("imports/fmt/m.proto", fileDescriptor_m_867dd34c461422b8) } - -var fileDescriptor_m_867dd34c461422b8 = []byte{ - // 109 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x29, 0xd6, 0x4f, 0xcb, 0x2d, 0xd1, 0xcf, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, - 0x62, 0x4e, 0xcb, 0x2d, 0x51, 0x62, 0xe6, 0x62, 0xf4, 0x75, 0xb2, 0x8f, 0xb2, 0x4d, 0xcf, 0x2c, - 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x07, - 0x2b, 0x4a, 0x2a, 0x4d, 0x83, 0x30, 0x92, 0x75, 0xd3, 0x53, 0xf3, 0x74, 0xd3, 0xf3, 0xf5, 0x4b, - 0x52, 0x8b, 0x4b, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0x91, 0x8c, 0x4c, 0x62, 0x03, 0xab, 0x31, 0x06, - 0x04, 0x00, 0x00, 0xff, 0xff, 0xc4, 0xc9, 0xee, 0xbe, 0x68, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.proto deleted file mode 100644 index 142d8cfa..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.proto +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; -package fmt; -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt"; -message M {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.pb.go deleted file mode 100644 index 963f7c72..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.pb.go +++ /dev/null @@ -1,130 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: imports/test_a_1/m1.proto - -package test_a_1 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type E1 int32 - -const ( - E1_E1_ZERO E1 = 0 -) - -var E1_name = map[int32]string{ - 0: "E1_ZERO", -} -var E1_value = map[string]int32{ - "E1_ZERO": 0, -} - -func (x E1) String() string { - return proto.EnumName(E1_name, int32(x)) -} -func (E1) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_m1_56a2598431d21e61, []int{0} -} - -type M1 struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *M1) Reset() { *m = M1{} } -func (m *M1) String() string { return proto.CompactTextString(m) } -func (*M1) ProtoMessage() {} -func (*M1) Descriptor() ([]byte, []int) { - return fileDescriptor_m1_56a2598431d21e61, []int{0} -} -func (m *M1) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_M1.Unmarshal(m, b) -} -func (m *M1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_M1.Marshal(b, m, deterministic) -} -func (dst *M1) XXX_Merge(src proto.Message) { - xxx_messageInfo_M1.Merge(dst, src) -} -func (m *M1) XXX_Size() int { - return xxx_messageInfo_M1.Size(m) -} -func (m *M1) XXX_DiscardUnknown() { - xxx_messageInfo_M1.DiscardUnknown(m) -} - -var xxx_messageInfo_M1 proto.InternalMessageInfo - -type M1_1 struct { - M1 *M1 `protobuf:"bytes,1,opt,name=m1,proto3" json:"m1,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *M1_1) Reset() { *m = M1_1{} } -func (m *M1_1) String() string { return proto.CompactTextString(m) } -func (*M1_1) ProtoMessage() {} -func (*M1_1) Descriptor() ([]byte, []int) { - return fileDescriptor_m1_56a2598431d21e61, []int{1} -} -func (m *M1_1) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_M1_1.Unmarshal(m, b) -} -func (m *M1_1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_M1_1.Marshal(b, m, deterministic) -} -func (dst *M1_1) XXX_Merge(src proto.Message) { - xxx_messageInfo_M1_1.Merge(dst, src) -} -func (m *M1_1) XXX_Size() int { - return xxx_messageInfo_M1_1.Size(m) -} -func (m *M1_1) XXX_DiscardUnknown() { - xxx_messageInfo_M1_1.DiscardUnknown(m) -} - -var xxx_messageInfo_M1_1 proto.InternalMessageInfo - -func (m *M1_1) GetM1() *M1 { - if m != nil { - return m.M1 - } - return nil -} - -func init() { - proto.RegisterType((*M1)(nil), "test.a.M1") - proto.RegisterType((*M1_1)(nil), "test.a.M1_1") - proto.RegisterEnum("test.a.E1", E1_name, E1_value) -} - -func init() { proto.RegisterFile("imports/test_a_1/m1.proto", fileDescriptor_m1_56a2598431d21e61) } - -var fileDescriptor_m1_56a2598431d21e61 = []byte{ - // 165 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd4, 0xcf, 0x35, 0xd4, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, - 0x1a, 0x2a, 0x29, 0x71, 0xb1, 0xf8, 0x1a, 0xc6, 0x1b, 0x0a, 0x49, 0x71, 0x31, 0xe5, 0x1a, 0x4a, - 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x71, 0xe9, 0x41, 0x94, 0xe8, 0xf9, 0x1a, 0x06, 0x31, 0xe5, - 0x1a, 0x6a, 0x09, 0x72, 0x31, 0xb9, 0x1a, 0x0a, 0x71, 0x73, 0xb1, 0xbb, 0x1a, 0xc6, 0x47, 0xb9, - 0x06, 0xf9, 0x0b, 0x30, 0x38, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, - 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0xcd, 0x4f, 0x2a, 0x4d, 0x83, - 0x30, 0x92, 0x75, 0xd3, 0x53, 0xf3, 0x74, 0xd3, 0xf3, 0xc1, 0x4e, 0x48, 0x49, 0x2c, 0x49, 0xd4, - 0x47, 0x77, 0x53, 0x12, 0x1b, 0x58, 0xa1, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xae, 0xc9, - 0xcd, 0xae, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.proto deleted file mode 100644 index da54c1ee..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.proto +++ /dev/null @@ -1,44 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; -package test.a; -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1"; - -message M1 {} - -message M1_1 { - M1 m1 = 1; -} - -enum E1 { - E1_ZERO = 0; -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.pb.go deleted file mode 100644 index 1b629bf3..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.pb.go +++ /dev/null @@ -1,67 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: imports/test_a_1/m2.proto - -package test_a_1 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type M2 struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *M2) Reset() { *m = M2{} } -func (m *M2) String() string { return proto.CompactTextString(m) } -func (*M2) ProtoMessage() {} -func (*M2) Descriptor() ([]byte, []int) { - return fileDescriptor_m2_ccd6356c045a9ac3, []int{0} -} -func (m *M2) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_M2.Unmarshal(m, b) -} -func (m *M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_M2.Marshal(b, m, deterministic) -} -func (dst *M2) XXX_Merge(src proto.Message) { - xxx_messageInfo_M2.Merge(dst, src) -} -func (m *M2) XXX_Size() int { - return xxx_messageInfo_M2.Size(m) -} -func (m *M2) XXX_DiscardUnknown() { - xxx_messageInfo_M2.DiscardUnknown(m) -} - -var xxx_messageInfo_M2 proto.InternalMessageInfo - -func init() { - proto.RegisterType((*M2)(nil), "test.a.M2") -} - -func init() { proto.RegisterFile("imports/test_a_1/m2.proto", fileDescriptor_m2_ccd6356c045a9ac3) } - -var fileDescriptor_m2_ccd6356c045a9ac3 = []byte{ - // 114 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd4, 0xcf, 0x35, 0xd2, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, - 0x1a, 0x39, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, - 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x15, 0x26, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, - 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0xb3, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0xd1, 0x0d, 0x4f, - 0x62, 0x03, 0x2b, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xe0, 0x7e, 0xc0, 0x77, 0x00, - 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.proto deleted file mode 100644 index 49499dc9..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.proto +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; -package test.a; -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1"; -message M2 {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.pb.go deleted file mode 100644 index e3895d2b..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.pb.go +++ /dev/null @@ -1,67 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: imports/test_a_2/m3.proto - -package test_a_2 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type M3 struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *M3) Reset() { *m = M3{} } -func (m *M3) String() string { return proto.CompactTextString(m) } -func (*M3) ProtoMessage() {} -func (*M3) Descriptor() ([]byte, []int) { - return fileDescriptor_m3_de310e87d08d4216, []int{0} -} -func (m *M3) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_M3.Unmarshal(m, b) -} -func (m *M3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_M3.Marshal(b, m, deterministic) -} -func (dst *M3) XXX_Merge(src proto.Message) { - xxx_messageInfo_M3.Merge(dst, src) -} -func (m *M3) XXX_Size() int { - return xxx_messageInfo_M3.Size(m) -} -func (m *M3) XXX_DiscardUnknown() { - xxx_messageInfo_M3.DiscardUnknown(m) -} - -var xxx_messageInfo_M3 proto.InternalMessageInfo - -func init() { - proto.RegisterType((*M3)(nil), "test.a.M3") -} - -func init() { proto.RegisterFile("imports/test_a_2/m3.proto", fileDescriptor_m3_de310e87d08d4216) } - -var fileDescriptor_m3_de310e87d08d4216 = []byte{ - // 114 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd2, 0xcf, 0x35, 0xd6, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, - 0x1a, 0x3b, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, - 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x15, 0x26, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, - 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0xb3, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0xd1, 0x0d, 0x4f, - 0x62, 0x03, 0x2b, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x23, 0x86, 0x27, 0x47, 0x77, 0x00, - 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.proto deleted file mode 100644 index 5e811ef8..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.proto +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; -package test.a; -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2"; -message M3 {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.pb.go deleted file mode 100644 index 65a3bad2..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.pb.go +++ /dev/null @@ -1,67 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: imports/test_a_2/m4.proto - -package test_a_2 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type M4 struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *M4) Reset() { *m = M4{} } -func (m *M4) String() string { return proto.CompactTextString(m) } -func (*M4) ProtoMessage() {} -func (*M4) Descriptor() ([]byte, []int) { - return fileDescriptor_m4_da12b386229f3791, []int{0} -} -func (m *M4) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_M4.Unmarshal(m, b) -} -func (m *M4) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_M4.Marshal(b, m, deterministic) -} -func (dst *M4) XXX_Merge(src proto.Message) { - xxx_messageInfo_M4.Merge(dst, src) -} -func (m *M4) XXX_Size() int { - return xxx_messageInfo_M4.Size(m) -} -func (m *M4) XXX_DiscardUnknown() { - xxx_messageInfo_M4.DiscardUnknown(m) -} - -var xxx_messageInfo_M4 proto.InternalMessageInfo - -func init() { - proto.RegisterType((*M4)(nil), "test.a.M4") -} - -func init() { proto.RegisterFile("imports/test_a_2/m4.proto", fileDescriptor_m4_da12b386229f3791) } - -var fileDescriptor_m4_da12b386229f3791 = []byte{ - // 114 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd2, 0xcf, 0x35, 0xd1, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, - 0x9a, 0x38, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, - 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x15, 0x26, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, - 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0xb3, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0xd1, 0x0d, 0x4f, - 0x62, 0x03, 0x2b, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x58, 0xcb, 0x10, 0xc8, 0x77, 0x00, - 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.proto deleted file mode 100644 index 8f8fe3e1..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.proto +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; -package test.a; -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2"; -message M4 {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.pb.go deleted file mode 100644 index 831f4149..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.pb.go +++ /dev/null @@ -1,67 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: imports/test_b_1/m1.proto - -package beta // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type M1 struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *M1) Reset() { *m = M1{} } -func (m *M1) String() string { return proto.CompactTextString(m) } -func (*M1) ProtoMessage() {} -func (*M1) Descriptor() ([]byte, []int) { - return fileDescriptor_m1_aff127b054aec649, []int{0} -} -func (m *M1) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_M1.Unmarshal(m, b) -} -func (m *M1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_M1.Marshal(b, m, deterministic) -} -func (dst *M1) XXX_Merge(src proto.Message) { - xxx_messageInfo_M1.Merge(dst, src) -} -func (m *M1) XXX_Size() int { - return xxx_messageInfo_M1.Size(m) -} -func (m *M1) XXX_DiscardUnknown() { - xxx_messageInfo_M1.DiscardUnknown(m) -} - -var xxx_messageInfo_M1 proto.InternalMessageInfo - -func init() { - proto.RegisterType((*M1)(nil), "test.b.part1.M1") -} - -func init() { proto.RegisterFile("imports/test_b_1/m1.proto", fileDescriptor_m1_aff127b054aec649) } - -var fileDescriptor_m1_aff127b054aec649 = []byte{ - // 125 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8a, 0x37, 0xd4, 0xcf, 0x35, 0xd4, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x09, 0xe9, 0x25, 0xe9, 0x15, 0x24, 0x16, 0x95, - 0x18, 0x2a, 0xb1, 0x70, 0x31, 0xf9, 0x1a, 0x3a, 0x79, 0x46, 0xb9, 0xa7, 0x67, 0x96, 0x64, 0x94, - 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x95, 0x27, - 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0x13, 0x53, 0x12, - 0x4b, 0x12, 0xf5, 0xd1, 0xad, 0xb0, 0x4e, 0x4a, 0x2d, 0x49, 0x4c, 0x62, 0x03, 0xab, 0x36, 0x06, - 0x04, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xf1, 0x3b, 0x7f, 0x82, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.proto deleted file mode 100644 index 2c35ec4a..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.proto +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; -package test.b.part1; -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1;beta"; -message M1 {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.pb.go deleted file mode 100644 index bc741056..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.pb.go +++ /dev/null @@ -1,67 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: imports/test_b_1/m2.proto - -package beta // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type M2 struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *M2) Reset() { *m = M2{} } -func (m *M2) String() string { return proto.CompactTextString(m) } -func (*M2) ProtoMessage() {} -func (*M2) Descriptor() ([]byte, []int) { - return fileDescriptor_m2_0c59cab35ba1b0d8, []int{0} -} -func (m *M2) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_M2.Unmarshal(m, b) -} -func (m *M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_M2.Marshal(b, m, deterministic) -} -func (dst *M2) XXX_Merge(src proto.Message) { - xxx_messageInfo_M2.Merge(dst, src) -} -func (m *M2) XXX_Size() int { - return xxx_messageInfo_M2.Size(m) -} -func (m *M2) XXX_DiscardUnknown() { - xxx_messageInfo_M2.DiscardUnknown(m) -} - -var xxx_messageInfo_M2 proto.InternalMessageInfo - -func init() { - proto.RegisterType((*M2)(nil), "test.b.part2.M2") -} - -func init() { proto.RegisterFile("imports/test_b_1/m2.proto", fileDescriptor_m2_0c59cab35ba1b0d8) } - -var fileDescriptor_m2_0c59cab35ba1b0d8 = []byte{ - // 125 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8a, 0x37, 0xd4, 0xcf, 0x35, 0xd2, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x09, 0xe9, 0x25, 0xe9, 0x15, 0x24, 0x16, 0x95, - 0x18, 0x29, 0xb1, 0x70, 0x31, 0xf9, 0x1a, 0x39, 0x79, 0x46, 0xb9, 0xa7, 0x67, 0x96, 0x64, 0x94, - 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x95, 0x27, - 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0x13, 0x53, 0x12, - 0x4b, 0x12, 0xf5, 0xd1, 0xad, 0xb0, 0x4e, 0x4a, 0x2d, 0x49, 0x4c, 0x62, 0x03, 0xab, 0x36, 0x06, - 0x04, 0x00, 0x00, 0xff, 0xff, 0x44, 0x29, 0xbe, 0x6d, 0x82, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.proto deleted file mode 100644 index 13723be4..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.proto +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; -package test.b.part2; -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1;beta"; -message M2 {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.pb.go deleted file mode 100644 index 4f79694c..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.pb.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: imports/test_import_a1m1.proto - -package imports // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import test_a_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type A1M1 struct { - F *test_a_1.M1 `protobuf:"bytes,1,opt,name=f,proto3" json:"f,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *A1M1) Reset() { *m = A1M1{} } -func (m *A1M1) String() string { return proto.CompactTextString(m) } -func (*A1M1) ProtoMessage() {} -func (*A1M1) Descriptor() ([]byte, []int) { - return fileDescriptor_test_import_a1m1_d7f2b5c638a69f6e, []int{0} -} -func (m *A1M1) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_A1M1.Unmarshal(m, b) -} -func (m *A1M1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_A1M1.Marshal(b, m, deterministic) -} -func (dst *A1M1) XXX_Merge(src proto.Message) { - xxx_messageInfo_A1M1.Merge(dst, src) -} -func (m *A1M1) XXX_Size() int { - return xxx_messageInfo_A1M1.Size(m) -} -func (m *A1M1) XXX_DiscardUnknown() { - xxx_messageInfo_A1M1.DiscardUnknown(m) -} - -var xxx_messageInfo_A1M1 proto.InternalMessageInfo - -func (m *A1M1) GetF() *test_a_1.M1 { - if m != nil { - return m.F - } - return nil -} - -func init() { - proto.RegisterType((*A1M1)(nil), "test.A1M1") -} - -func init() { - proto.RegisterFile("imports/test_import_a1m1.proto", fileDescriptor_test_import_a1m1_d7f2b5c638a69f6e) -} - -var fileDescriptor_test_import_a1m1_d7f2b5c638a69f6e = []byte{ - // 149 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcb, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x87, 0x70, 0xe2, 0x13, 0x0d, 0x73, 0x0d, - 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x58, 0x40, 0xe2, 0x52, 0x92, 0x28, 0xaa, 0x12, 0xe3, - 0x0d, 0xf5, 0x61, 0x0a, 0x94, 0x14, 0xb8, 0x58, 0x1c, 0x0d, 0x7d, 0x0d, 0x85, 0x24, 0xb8, 0x18, - 0xd3, 0x24, 0x18, 0x15, 0x18, 0x35, 0xb8, 0x8d, 0xb8, 0xf4, 0x40, 0xca, 0xf4, 0x12, 0xf5, 0x7c, - 0x0d, 0x83, 0x18, 0xd3, 0x9c, 0xac, 0xa3, 0x2c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, - 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0x20, - 0x8c, 0x64, 0xdd, 0xf4, 0xd4, 0x3c, 0xdd, 0xf4, 0x7c, 0xb0, 0xf9, 0x29, 0x89, 0x25, 0x89, 0xfa, - 0x50, 0x0b, 0x93, 0xd8, 0xc0, 0xf2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x2f, 0x18, - 0x23, 0xa8, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.proto deleted file mode 100644 index abf07f2a..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.proto +++ /dev/null @@ -1,42 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package test; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports"; - -import "imports/test_a_1/m1.proto"; - -message A1M1 { - test.a.M1 f = 1; -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.pb.go deleted file mode 100644 index f5aa2e82..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.pb.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: imports/test_import_a1m2.proto - -package imports // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import test_a_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type A1M2 struct { - F *test_a_1.M2 `protobuf:"bytes,1,opt,name=f,proto3" json:"f,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *A1M2) Reset() { *m = A1M2{} } -func (m *A1M2) String() string { return proto.CompactTextString(m) } -func (*A1M2) ProtoMessage() {} -func (*A1M2) Descriptor() ([]byte, []int) { - return fileDescriptor_test_import_a1m2_9a3281ce9464e116, []int{0} -} -func (m *A1M2) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_A1M2.Unmarshal(m, b) -} -func (m *A1M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_A1M2.Marshal(b, m, deterministic) -} -func (dst *A1M2) XXX_Merge(src proto.Message) { - xxx_messageInfo_A1M2.Merge(dst, src) -} -func (m *A1M2) XXX_Size() int { - return xxx_messageInfo_A1M2.Size(m) -} -func (m *A1M2) XXX_DiscardUnknown() { - xxx_messageInfo_A1M2.DiscardUnknown(m) -} - -var xxx_messageInfo_A1M2 proto.InternalMessageInfo - -func (m *A1M2) GetF() *test_a_1.M2 { - if m != nil { - return m.F - } - return nil -} - -func init() { - proto.RegisterType((*A1M2)(nil), "test.A1M2") -} - -func init() { - proto.RegisterFile("imports/test_import_a1m2.proto", fileDescriptor_test_import_a1m2_9a3281ce9464e116) -} - -var fileDescriptor_test_import_a1m2_9a3281ce9464e116 = []byte{ - // 149 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcb, 0xcc, 0x2d, 0xc8, - 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x87, 0x70, 0xe2, 0x13, 0x0d, 0x73, 0x8d, - 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x58, 0x40, 0xe2, 0x52, 0x92, 0x28, 0xaa, 0x12, 0xe3, - 0x0d, 0xf5, 0x61, 0x0a, 0x94, 0x14, 0xb8, 0x58, 0x1c, 0x0d, 0x7d, 0x8d, 0x84, 0x24, 0xb8, 0x18, - 0xd3, 0x24, 0x18, 0x15, 0x18, 0x35, 0xb8, 0x8d, 0xb8, 0xf4, 0x40, 0xca, 0xf4, 0x12, 0xf5, 0x7c, - 0x8d, 0x82, 0x18, 0xd3, 0x9c, 0xac, 0xa3, 0x2c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, - 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0x20, - 0x8c, 0x64, 0xdd, 0xf4, 0xd4, 0x3c, 0xdd, 0xf4, 0x7c, 0xb0, 0xf9, 0x29, 0x89, 0x25, 0x89, 0xfa, - 0x50, 0x0b, 0x93, 0xd8, 0xc0, 0xf2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1f, 0x88, 0xfb, - 0xea, 0xa8, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.proto deleted file mode 100644 index 5c53950d..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.proto +++ /dev/null @@ -1,42 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package test; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports"; - -import "imports/test_a_1/m2.proto"; - -message A1M2 { - test.a.M2 f = 1; -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.pb.go deleted file mode 100644 index 4f9fd046..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.pb.go +++ /dev/null @@ -1,138 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: imports/test_import_all.proto - -package imports // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import fmt1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt" -import test_a_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" -import test_a_2 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2" -import test_b_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type All struct { - Am1 *test_a_1.M1 `protobuf:"bytes,1,opt,name=am1,proto3" json:"am1,omitempty"` - Am2 *test_a_1.M2 `protobuf:"bytes,2,opt,name=am2,proto3" json:"am2,omitempty"` - Am3 *test_a_2.M3 `protobuf:"bytes,3,opt,name=am3,proto3" json:"am3,omitempty"` - Am4 *test_a_2.M4 `protobuf:"bytes,4,opt,name=am4,proto3" json:"am4,omitempty"` - Bm1 *test_b_1.M1 `protobuf:"bytes,5,opt,name=bm1,proto3" json:"bm1,omitempty"` - Bm2 *test_b_1.M2 `protobuf:"bytes,6,opt,name=bm2,proto3" json:"bm2,omitempty"` - Fmt *fmt1.M `protobuf:"bytes,7,opt,name=fmt,proto3" json:"fmt,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *All) Reset() { *m = All{} } -func (m *All) String() string { return proto.CompactTextString(m) } -func (*All) ProtoMessage() {} -func (*All) Descriptor() ([]byte, []int) { - return fileDescriptor_test_import_all_b41dc4592e4a4f3b, []int{0} -} -func (m *All) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_All.Unmarshal(m, b) -} -func (m *All) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_All.Marshal(b, m, deterministic) -} -func (dst *All) XXX_Merge(src proto.Message) { - xxx_messageInfo_All.Merge(dst, src) -} -func (m *All) XXX_Size() int { - return xxx_messageInfo_All.Size(m) -} -func (m *All) XXX_DiscardUnknown() { - xxx_messageInfo_All.DiscardUnknown(m) -} - -var xxx_messageInfo_All proto.InternalMessageInfo - -func (m *All) GetAm1() *test_a_1.M1 { - if m != nil { - return m.Am1 - } - return nil -} - -func (m *All) GetAm2() *test_a_1.M2 { - if m != nil { - return m.Am2 - } - return nil -} - -func (m *All) GetAm3() *test_a_2.M3 { - if m != nil { - return m.Am3 - } - return nil -} - -func (m *All) GetAm4() *test_a_2.M4 { - if m != nil { - return m.Am4 - } - return nil -} - -func (m *All) GetBm1() *test_b_1.M1 { - if m != nil { - return m.Bm1 - } - return nil -} - -func (m *All) GetBm2() *test_b_1.M2 { - if m != nil { - return m.Bm2 - } - return nil -} - -func (m *All) GetFmt() *fmt1.M { - if m != nil { - return m.Fmt - } - return nil -} - -func init() { - proto.RegisterType((*All)(nil), "test.All") -} - -func init() { - proto.RegisterFile("imports/test_import_all.proto", fileDescriptor_test_import_all_b41dc4592e4a4f3b) -} - -var fileDescriptor_test_import_all_b41dc4592e4a4f3b = []byte{ - // 258 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xd0, 0xb1, 0x4e, 0xc3, 0x30, - 0x10, 0x06, 0x60, 0x15, 0x97, 0x20, 0x99, 0x05, 0x85, 0xc5, 0x20, 0x90, 0x50, 0x27, 0x96, 0xda, - 0xb2, 0x9d, 0x05, 0x31, 0xc1, 0xde, 0xa5, 0x23, 0x4b, 0x64, 0x97, 0xc6, 0x54, 0xf2, 0xd5, 0x51, - 0x7a, 0x7d, 0x5e, 0x5e, 0x05, 0xd9, 0x07, 0x12, 0x84, 0x66, 0x4b, 0xfe, 0xef, 0xb7, 0xce, 0x3e, - 0x7e, 0xbf, 0x83, 0x3e, 0x0d, 0x78, 0x50, 0xb8, 0x3d, 0x60, 0x4b, 0x3f, 0xad, 0x8b, 0x51, 0xf6, - 0x43, 0xc2, 0x54, 0xcf, 0x73, 0x7c, 0x7b, 0xf3, 0xa7, 0xe4, 0x5a, 0xad, 0x40, 0x53, 0xe1, 0x14, - 0x99, 0x09, 0x32, 0x0a, 0xec, 0x34, 0x35, 0x27, 0xc9, 0x4f, 0xcf, 0xf2, 0xbf, 0x67, 0x5d, 0xff, - 0x50, 0x07, 0xa8, 0x80, 0xc2, 0xc5, 0xe7, 0x8c, 0xb3, 0x97, 0x18, 0xeb, 0x3b, 0xce, 0x1c, 0x68, - 0x31, 0x7b, 0x98, 0x3d, 0x5e, 0x1a, 0x2e, 0xf3, 0x69, 0xe9, 0xe4, 0x4a, 0xaf, 0x73, 0x4c, 0x6a, - 0xc4, 0xd9, 0x48, 0x4d, 0x56, 0x43, 0x6a, 0x05, 0x1b, 0xa9, 0xcd, 0x6a, 0x49, 0x1b, 0x31, 0x1f, - 0x69, 0x93, 0xb5, 0xa9, 0x17, 0x9c, 0x79, 0xd0, 0xe2, 0xbc, 0xe8, 0x15, 0xa9, 0x97, 0xbd, 0x1b, - 0x50, 0x97, 0xe9, 0x1e, 0x34, 0x75, 0x8c, 0xa8, 0xfe, 0x77, 0x4c, 0xb9, 0x83, 0x07, 0x53, 0x0b, - 0xce, 0x3a, 0x40, 0x71, 0x51, 0x3a, 0x95, 0xec, 0x00, 0xe5, 0x6a, 0x9d, 0xa3, 0xd7, 0xe7, 0xb7, - 0xa7, 0xb0, 0xc3, 0x8f, 0xa3, 0x97, 0x9b, 0x04, 0x2a, 0xa4, 0xe8, 0xf6, 0x41, 0x95, 0xc7, 0xfb, - 0x63, 0x47, 0x1f, 0x9b, 0x65, 0xd8, 0xee, 0x97, 0x21, 0x95, 0xa5, 0xbd, 0x3b, 0x74, 0xea, 0x7b, - 0x55, 0xbe, 0x2a, 0x6e, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x95, 0x39, 0xa3, 0x82, 0x03, 0x02, - 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.proto deleted file mode 100644 index 582d722e..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.proto +++ /dev/null @@ -1,58 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package test; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports"; - -// test_a_1/m*.proto are in the same Go package and proto package. -// test_a_*/*.proto are in different Go packages, but the same proto package. -// test_b_1/*.proto are in the same Go package, but different proto packages. -// fmt/m.proto has a package name which conflicts with "fmt". -import "imports/test_a_1/m1.proto"; -import "imports/test_a_1/m2.proto"; -import "imports/test_a_2/m3.proto"; -import "imports/test_a_2/m4.proto"; -import "imports/test_b_1/m1.proto"; -import "imports/test_b_1/m2.proto"; -import "imports/fmt/m.proto"; - -message All { - test.a.M1 am1 = 1; - test.a.M2 am2 = 2; - test.a.M3 am3 = 3; - test.a.M4 am4 = 4; - test.b.part1.M1 bm1 = 5; - test.b.part2.M2 bm2 = 6; - fmt.M fmt = 7; -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go deleted file mode 100644 index 7ec1f2db..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// A simple binary to link together the protocol buffers in this test. - -package testdata - -import ( - "testing" - - importspb "github.com/golang/protobuf/protoc-gen-go/testdata/imports" - multipb "github.com/golang/protobuf/protoc-gen-go/testdata/multi" - mytestpb "github.com/golang/protobuf/protoc-gen-go/testdata/my_test" -) - -func TestLink(t *testing.T) { - _ = &multipb.Multi1{} - _ = &mytestpb.Request{} - _ = &importspb.All{} -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.pb.go deleted file mode 100644 index da0fdf8f..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.pb.go +++ /dev/null @@ -1,96 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: multi/multi1.proto - -package multitest // import "github.com/golang/protobuf/protoc-gen-go/testdata/multi" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Multi1 struct { - Multi2 *Multi2 `protobuf:"bytes,1,req,name=multi2" json:"multi2,omitempty"` - Color *Multi2_Color `protobuf:"varint,2,opt,name=color,enum=multitest.Multi2_Color" json:"color,omitempty"` - HatType *Multi3_HatType `protobuf:"varint,3,opt,name=hat_type,json=hatType,enum=multitest.Multi3_HatType" json:"hat_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Multi1) Reset() { *m = Multi1{} } -func (m *Multi1) String() string { return proto.CompactTextString(m) } -func (*Multi1) ProtoMessage() {} -func (*Multi1) Descriptor() ([]byte, []int) { - return fileDescriptor_multi1_08e50c6822e808b8, []int{0} -} -func (m *Multi1) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Multi1.Unmarshal(m, b) -} -func (m *Multi1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Multi1.Marshal(b, m, deterministic) -} -func (dst *Multi1) XXX_Merge(src proto.Message) { - xxx_messageInfo_Multi1.Merge(dst, src) -} -func (m *Multi1) XXX_Size() int { - return xxx_messageInfo_Multi1.Size(m) -} -func (m *Multi1) XXX_DiscardUnknown() { - xxx_messageInfo_Multi1.DiscardUnknown(m) -} - -var xxx_messageInfo_Multi1 proto.InternalMessageInfo - -func (m *Multi1) GetMulti2() *Multi2 { - if m != nil { - return m.Multi2 - } - return nil -} - -func (m *Multi1) GetColor() Multi2_Color { - if m != nil && m.Color != nil { - return *m.Color - } - return Multi2_BLUE -} - -func (m *Multi1) GetHatType() Multi3_HatType { - if m != nil && m.HatType != nil { - return *m.HatType - } - return Multi3_FEDORA -} - -func init() { - proto.RegisterType((*Multi1)(nil), "multitest.Multi1") -} - -func init() { proto.RegisterFile("multi/multi1.proto", fileDescriptor_multi1_08e50c6822e808b8) } - -var fileDescriptor_multi1_08e50c6822e808b8 = []byte{ - // 200 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xca, 0x2d, 0xcd, 0x29, - 0xc9, 0xd4, 0x07, 0x93, 0x86, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, 0x60, 0x5e, 0x49, - 0x6a, 0x71, 0x89, 0x14, 0xb2, 0xb4, 0x11, 0x44, 0x1a, 0x45, 0xcc, 0x18, 0x22, 0xa6, 0x34, 0x83, - 0x91, 0x8b, 0xcd, 0x17, 0x6c, 0x86, 0x90, 0x26, 0x17, 0x1b, 0x44, 0xb9, 0x04, 0xa3, 0x02, 0x93, - 0x06, 0xb7, 0x91, 0xa0, 0x1e, 0xdc, 0x38, 0x3d, 0xb0, 0x12, 0xa3, 0x20, 0xa8, 0x02, 0x21, 0x5d, - 0x2e, 0xd6, 0xe4, 0xfc, 0x9c, 0xfc, 0x22, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x3e, 0x23, 0x71, 0x0c, - 0x95, 0x7a, 0xce, 0x20, 0xe9, 0x20, 0x88, 0x2a, 0x21, 0x13, 0x2e, 0x8e, 0x8c, 0xc4, 0x92, 0xf8, - 0x92, 0xca, 0x82, 0x54, 0x09, 0x66, 0xb0, 0x0e, 0x49, 0x74, 0x1d, 0xc6, 0x7a, 0x1e, 0x89, 0x25, - 0x21, 0x95, 0x05, 0xa9, 0x41, 0xec, 0x19, 0x10, 0x86, 0x93, 0x73, 0x94, 0x63, 0x7a, 0x66, 0x49, - 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, 0xba, 0x3e, 0xd8, - 0xd5, 0x49, 0xa5, 0x69, 0x10, 0x46, 0xb2, 0x6e, 0x7a, 0x6a, 0x9e, 0x6e, 0x7a, 0xbe, 0x3e, 0xc8, - 0xa0, 0x94, 0xc4, 0x92, 0x44, 0x88, 0xe7, 0xac, 0xe1, 0x86, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, - 0x60, 0x7d, 0xfc, 0x9f, 0x27, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto deleted file mode 100644 index d3a32041..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto +++ /dev/null @@ -1,46 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; - -import "multi/multi2.proto"; -import "multi/multi3.proto"; - -package multitest; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/multi;multitest"; - -message Multi1 { - required Multi2 multi2 = 1; - optional Multi2.Color color = 2; - optional Multi3.HatType hat_type = 3; -} - diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.pb.go deleted file mode 100644 index b66ce793..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.pb.go +++ /dev/null @@ -1,128 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: multi/multi2.proto - -package multitest // import "github.com/golang/protobuf/protoc-gen-go/testdata/multi" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Multi2_Color int32 - -const ( - Multi2_BLUE Multi2_Color = 1 - Multi2_GREEN Multi2_Color = 2 - Multi2_RED Multi2_Color = 3 -) - -var Multi2_Color_name = map[int32]string{ - 1: "BLUE", - 2: "GREEN", - 3: "RED", -} -var Multi2_Color_value = map[string]int32{ - "BLUE": 1, - "GREEN": 2, - "RED": 3, -} - -func (x Multi2_Color) Enum() *Multi2_Color { - p := new(Multi2_Color) - *p = x - return p -} -func (x Multi2_Color) String() string { - return proto.EnumName(Multi2_Color_name, int32(x)) -} -func (x *Multi2_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Multi2_Color_value, data, "Multi2_Color") - if err != nil { - return err - } - *x = Multi2_Color(value) - return nil -} -func (Multi2_Color) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_multi2_c47490ad66d93e67, []int{0, 0} -} - -type Multi2 struct { - RequiredValue *int32 `protobuf:"varint,1,req,name=required_value,json=requiredValue" json:"required_value,omitempty"` - Color *Multi2_Color `protobuf:"varint,2,opt,name=color,enum=multitest.Multi2_Color" json:"color,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Multi2) Reset() { *m = Multi2{} } -func (m *Multi2) String() string { return proto.CompactTextString(m) } -func (*Multi2) ProtoMessage() {} -func (*Multi2) Descriptor() ([]byte, []int) { - return fileDescriptor_multi2_c47490ad66d93e67, []int{0} -} -func (m *Multi2) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Multi2.Unmarshal(m, b) -} -func (m *Multi2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Multi2.Marshal(b, m, deterministic) -} -func (dst *Multi2) XXX_Merge(src proto.Message) { - xxx_messageInfo_Multi2.Merge(dst, src) -} -func (m *Multi2) XXX_Size() int { - return xxx_messageInfo_Multi2.Size(m) -} -func (m *Multi2) XXX_DiscardUnknown() { - xxx_messageInfo_Multi2.DiscardUnknown(m) -} - -var xxx_messageInfo_Multi2 proto.InternalMessageInfo - -func (m *Multi2) GetRequiredValue() int32 { - if m != nil && m.RequiredValue != nil { - return *m.RequiredValue - } - return 0 -} - -func (m *Multi2) GetColor() Multi2_Color { - if m != nil && m.Color != nil { - return *m.Color - } - return Multi2_BLUE -} - -func init() { - proto.RegisterType((*Multi2)(nil), "multitest.Multi2") - proto.RegisterEnum("multitest.Multi2_Color", Multi2_Color_name, Multi2_Color_value) -} - -func init() { proto.RegisterFile("multi/multi2.proto", fileDescriptor_multi2_c47490ad66d93e67) } - -var fileDescriptor_multi2_c47490ad66d93e67 = []byte{ - // 202 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xca, 0x2d, 0xcd, 0x29, - 0xc9, 0xd4, 0x07, 0x93, 0x46, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, 0x60, 0x5e, 0x49, - 0x6a, 0x71, 0x89, 0x52, 0x2b, 0x23, 0x17, 0x9b, 0x2f, 0x58, 0x4e, 0x48, 0x95, 0x8b, 0xaf, 0x28, - 0xb5, 0xb0, 0x34, 0xb3, 0x28, 0x35, 0x25, 0xbe, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x51, 0x81, - 0x49, 0x83, 0x35, 0x88, 0x17, 0x26, 0x1a, 0x06, 0x12, 0x14, 0xd2, 0xe5, 0x62, 0x4d, 0xce, 0xcf, - 0xc9, 0x2f, 0x92, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x33, 0x12, 0xd7, 0x83, 0x1b, 0xa6, 0x07, 0x31, - 0x48, 0xcf, 0x19, 0x24, 0x1d, 0x04, 0x51, 0xa5, 0xa4, 0xca, 0xc5, 0x0a, 0xe6, 0x0b, 0x71, 0x70, - 0xb1, 0x38, 0xf9, 0x84, 0xba, 0x0a, 0x30, 0x0a, 0x71, 0x72, 0xb1, 0xba, 0x07, 0xb9, 0xba, 0xfa, - 0x09, 0x30, 0x09, 0xb1, 0x73, 0x31, 0x07, 0xb9, 0xba, 0x08, 0x30, 0x3b, 0x39, 0x47, 0x39, 0xa6, - 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, - 0xeb, 0x83, 0x5d, 0x9b, 0x54, 0x9a, 0x06, 0x61, 0x24, 0xeb, 0xa6, 0xa7, 0xe6, 0xe9, 0xa6, 0xe7, - 0xeb, 0x83, 0xec, 0x4a, 0x49, 0x2c, 0x49, 0x84, 0x78, 0xca, 0x1a, 0x6e, 0x3f, 0x20, 0x00, 0x00, - 0xff, 0xff, 0x49, 0x3b, 0x52, 0x44, 0xec, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto deleted file mode 100644 index ec5b431e..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto +++ /dev/null @@ -1,48 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; - -package multitest; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/multi;multitest"; - -message Multi2 { - required int32 required_value = 1; - - enum Color { - BLUE = 1; - GREEN = 2; - RED = 3; - }; - optional Color color = 2; -} - diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.pb.go deleted file mode 100644 index f03c350a..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.pb.go +++ /dev/null @@ -1,115 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: multi/multi3.proto - -package multitest // import "github.com/golang/protobuf/protoc-gen-go/testdata/multi" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Multi3_HatType int32 - -const ( - Multi3_FEDORA Multi3_HatType = 1 - Multi3_FEZ Multi3_HatType = 2 -) - -var Multi3_HatType_name = map[int32]string{ - 1: "FEDORA", - 2: "FEZ", -} -var Multi3_HatType_value = map[string]int32{ - "FEDORA": 1, - "FEZ": 2, -} - -func (x Multi3_HatType) Enum() *Multi3_HatType { - p := new(Multi3_HatType) - *p = x - return p -} -func (x Multi3_HatType) String() string { - return proto.EnumName(Multi3_HatType_name, int32(x)) -} -func (x *Multi3_HatType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Multi3_HatType_value, data, "Multi3_HatType") - if err != nil { - return err - } - *x = Multi3_HatType(value) - return nil -} -func (Multi3_HatType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_multi3_d55a72b4628b7875, []int{0, 0} -} - -type Multi3 struct { - HatType *Multi3_HatType `protobuf:"varint,1,opt,name=hat_type,json=hatType,enum=multitest.Multi3_HatType" json:"hat_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Multi3) Reset() { *m = Multi3{} } -func (m *Multi3) String() string { return proto.CompactTextString(m) } -func (*Multi3) ProtoMessage() {} -func (*Multi3) Descriptor() ([]byte, []int) { - return fileDescriptor_multi3_d55a72b4628b7875, []int{0} -} -func (m *Multi3) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Multi3.Unmarshal(m, b) -} -func (m *Multi3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Multi3.Marshal(b, m, deterministic) -} -func (dst *Multi3) XXX_Merge(src proto.Message) { - xxx_messageInfo_Multi3.Merge(dst, src) -} -func (m *Multi3) XXX_Size() int { - return xxx_messageInfo_Multi3.Size(m) -} -func (m *Multi3) XXX_DiscardUnknown() { - xxx_messageInfo_Multi3.DiscardUnknown(m) -} - -var xxx_messageInfo_Multi3 proto.InternalMessageInfo - -func (m *Multi3) GetHatType() Multi3_HatType { - if m != nil && m.HatType != nil { - return *m.HatType - } - return Multi3_FEDORA -} - -func init() { - proto.RegisterType((*Multi3)(nil), "multitest.Multi3") - proto.RegisterEnum("multitest.Multi3_HatType", Multi3_HatType_name, Multi3_HatType_value) -} - -func init() { proto.RegisterFile("multi/multi3.proto", fileDescriptor_multi3_d55a72b4628b7875) } - -var fileDescriptor_multi3_d55a72b4628b7875 = []byte{ - // 170 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xca, 0x2d, 0xcd, 0x29, - 0xc9, 0xd4, 0x07, 0x93, 0xc6, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, 0x60, 0x5e, 0x49, - 0x6a, 0x71, 0x89, 0x52, 0x1c, 0x17, 0x9b, 0x2f, 0x58, 0x4a, 0xc8, 0x84, 0x8b, 0x23, 0x23, 0xb1, - 0x24, 0xbe, 0xa4, 0xb2, 0x20, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0xcf, 0x48, 0x52, 0x0f, 0xae, - 0x4e, 0x0f, 0xa2, 0x48, 0xcf, 0x23, 0xb1, 0x24, 0xa4, 0xb2, 0x20, 0x35, 0x88, 0x3d, 0x03, 0xc2, - 0x50, 0x92, 0xe3, 0x62, 0x87, 0x8a, 0x09, 0x71, 0x71, 0xb1, 0xb9, 0xb9, 0xba, 0xf8, 0x07, 0x39, - 0x0a, 0x30, 0x0a, 0xb1, 0x73, 0x31, 0xbb, 0xb9, 0x46, 0x09, 0x30, 0x39, 0x39, 0x47, 0x39, 0xa6, - 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, - 0xeb, 0x83, 0x5d, 0x91, 0x54, 0x9a, 0x06, 0x61, 0x24, 0xeb, 0xa6, 0xa7, 0xe6, 0xe9, 0xa6, 0xe7, - 0xeb, 0x83, 0x2c, 0x4a, 0x49, 0x2c, 0x49, 0x84, 0x38, 0xd6, 0x1a, 0x6e, 0x39, 0x20, 0x00, 0x00, - 0xff, 0xff, 0xd5, 0xa4, 0x1a, 0x0e, 0xc4, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto deleted file mode 100644 index 8690b881..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto +++ /dev/null @@ -1,45 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; - -package multitest; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/multi;multitest"; - -message Multi3 { - enum HatType { - FEDORA = 1; - FEZ = 2; - }; - optional HatType hat_type = 1; -} - diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go deleted file mode 100644 index a033f8b0..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go +++ /dev/null @@ -1,1192 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: my_test/test.proto - -package test // import "github.com/golang/protobuf/protoc-gen-go/testdata/my_test" - -/* -This package holds interesting messages. -*/ - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type HatType int32 - -const ( - // deliberately skipping 0 - HatType_FEDORA HatType = 1 - HatType_FEZ HatType = 2 -) - -var HatType_name = map[int32]string{ - 1: "FEDORA", - 2: "FEZ", -} -var HatType_value = map[string]int32{ - "FEDORA": 1, - "FEZ": 2, -} - -func (x HatType) Enum() *HatType { - p := new(HatType) - *p = x - return p -} -func (x HatType) String() string { - return proto.EnumName(HatType_name, int32(x)) -} -func (x *HatType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType") - if err != nil { - return err - } - *x = HatType(value) - return nil -} -func (HatType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{0} -} - -// This enum represents days of the week. -type Days int32 - -const ( - Days_MONDAY Days = 1 - Days_TUESDAY Days = 2 - Days_LUNDI Days = 1 -) - -var Days_name = map[int32]string{ - 1: "MONDAY", - 2: "TUESDAY", - // Duplicate value: 1: "LUNDI", -} -var Days_value = map[string]int32{ - "MONDAY": 1, - "TUESDAY": 2, - "LUNDI": 1, -} - -func (x Days) Enum() *Days { - p := new(Days) - *p = x - return p -} -func (x Days) String() string { - return proto.EnumName(Days_name, int32(x)) -} -func (x *Days) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days") - if err != nil { - return err - } - *x = Days(value) - return nil -} -func (Days) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{1} -} - -type Request_Color int32 - -const ( - Request_RED Request_Color = 0 - Request_GREEN Request_Color = 1 - Request_BLUE Request_Color = 2 -) - -var Request_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Request_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Request_Color) Enum() *Request_Color { - p := new(Request_Color) - *p = x - return p -} -func (x Request_Color) String() string { - return proto.EnumName(Request_Color_name, int32(x)) -} -func (x *Request_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color") - if err != nil { - return err - } - *x = Request_Color(value) - return nil -} -func (Request_Color) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{0, 0} -} - -type Reply_Entry_Game int32 - -const ( - Reply_Entry_FOOTBALL Reply_Entry_Game = 1 - Reply_Entry_TENNIS Reply_Entry_Game = 2 -) - -var Reply_Entry_Game_name = map[int32]string{ - 1: "FOOTBALL", - 2: "TENNIS", -} -var Reply_Entry_Game_value = map[string]int32{ - "FOOTBALL": 1, - "TENNIS": 2, -} - -func (x Reply_Entry_Game) Enum() *Reply_Entry_Game { - p := new(Reply_Entry_Game) - *p = x - return p -} -func (x Reply_Entry_Game) String() string { - return proto.EnumName(Reply_Entry_Game_name, int32(x)) -} -func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game") - if err != nil { - return err - } - *x = Reply_Entry_Game(value) - return nil -} -func (Reply_Entry_Game) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{1, 0, 0} -} - -// This is a message that might be sent somewhere. -type Request struct { - Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"` - // optional imp.ImportedMessage imported_message = 2; - Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"` - Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` - // optional imp.ImportedMessage.Owner owner = 6; - Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` - Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` - // This is a map field. It will generate map[int32]string. - NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // This is a map field whose value type is a message. - MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` - // This field should not conflict with any getters. - GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{0} -} -func (m *Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Request.Unmarshal(m, b) -} -func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Request.Marshal(b, m, deterministic) -} -func (dst *Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request.Merge(dst, src) -} -func (m *Request) XXX_Size() int { - return xxx_messageInfo_Request.Size(m) -} -func (m *Request) XXX_DiscardUnknown() { - xxx_messageInfo_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Request proto.InternalMessageInfo - -const Default_Request_Hat HatType = HatType_FEDORA - -var Default_Request_Deadline float32 = float32(math.Inf(1)) - -func (m *Request) GetKey() []int64 { - if m != nil { - return m.Key - } - return nil -} - -func (m *Request) GetHue() Request_Color { - if m != nil && m.Hue != nil { - return *m.Hue - } - return Request_RED -} - -func (m *Request) GetHat() HatType { - if m != nil && m.Hat != nil { - return *m.Hat - } - return Default_Request_Hat -} - -func (m *Request) GetDeadline() float32 { - if m != nil && m.Deadline != nil { - return *m.Deadline - } - return Default_Request_Deadline -} - -func (m *Request) GetSomegroup() *Request_SomeGroup { - if m != nil { - return m.Somegroup - } - return nil -} - -func (m *Request) GetNameMapping() map[int32]string { - if m != nil { - return m.NameMapping - } - return nil -} - -func (m *Request) GetMsgMapping() map[int64]*Reply { - if m != nil { - return m.MsgMapping - } - return nil -} - -func (m *Request) GetReset_() int32 { - if m != nil && m.Reset_ != nil { - return *m.Reset_ - } - return 0 -} - -func (m *Request) GetGetKey_() string { - if m != nil && m.GetKey_ != nil { - return *m.GetKey_ - } - return "" -} - -type Request_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } -func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*Request_SomeGroup) ProtoMessage() {} -func (*Request_SomeGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{0, 0} -} -func (m *Request_SomeGroup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Request_SomeGroup.Unmarshal(m, b) -} -func (m *Request_SomeGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Request_SomeGroup.Marshal(b, m, deterministic) -} -func (dst *Request_SomeGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request_SomeGroup.Merge(dst, src) -} -func (m *Request_SomeGroup) XXX_Size() int { - return xxx_messageInfo_Request_SomeGroup.Size(m) -} -func (m *Request_SomeGroup) XXX_DiscardUnknown() { - xxx_messageInfo_Request_SomeGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_Request_SomeGroup proto.InternalMessageInfo - -func (m *Request_SomeGroup) GetGroupField() int32 { - if m != nil && m.GroupField != nil { - return *m.GroupField - } - return 0 -} - -type Reply struct { - Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` - CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Reply) Reset() { *m = Reply{} } -func (m *Reply) String() string { return proto.CompactTextString(m) } -func (*Reply) ProtoMessage() {} -func (*Reply) Descriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{1} -} - -var extRange_Reply = []proto.ExtensionRange{ - {Start: 100, End: 536870911}, -} - -func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_Reply -} -func (m *Reply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Reply.Unmarshal(m, b) -} -func (m *Reply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Reply.Marshal(b, m, deterministic) -} -func (dst *Reply) XXX_Merge(src proto.Message) { - xxx_messageInfo_Reply.Merge(dst, src) -} -func (m *Reply) XXX_Size() int { - return xxx_messageInfo_Reply.Size(m) -} -func (m *Reply) XXX_DiscardUnknown() { - xxx_messageInfo_Reply.DiscardUnknown(m) -} - -var xxx_messageInfo_Reply proto.InternalMessageInfo - -func (m *Reply) GetFound() []*Reply_Entry { - if m != nil { - return m.Found - } - return nil -} - -func (m *Reply) GetCompactKeys() []int32 { - if m != nil { - return m.CompactKeys - } - return nil -} - -type Reply_Entry struct { - KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` - Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` - XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } -func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } -func (*Reply_Entry) ProtoMessage() {} -func (*Reply_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{1, 0} -} -func (m *Reply_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Reply_Entry.Unmarshal(m, b) -} -func (m *Reply_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Reply_Entry.Marshal(b, m, deterministic) -} -func (dst *Reply_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_Reply_Entry.Merge(dst, src) -} -func (m *Reply_Entry) XXX_Size() int { - return xxx_messageInfo_Reply_Entry.Size(m) -} -func (m *Reply_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_Reply_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_Reply_Entry proto.InternalMessageInfo - -const Default_Reply_Entry_Value int64 = 7 - -func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 { - if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil { - return *m.KeyThatNeeds_1234Camel_CasIng - } - return 0 -} - -func (m *Reply_Entry) GetValue() int64 { - if m != nil && m.Value != nil { - return *m.Value - } - return Default_Reply_Entry_Value -} - -func (m *Reply_Entry) GetXMyFieldName_2() int64 { - if m != nil && m.XMyFieldName_2 != nil { - return *m.XMyFieldName_2 - } - return 0 -} - -type OtherBase struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OtherBase) Reset() { *m = OtherBase{} } -func (m *OtherBase) String() string { return proto.CompactTextString(m) } -func (*OtherBase) ProtoMessage() {} -func (*OtherBase) Descriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{2} -} - -var extRange_OtherBase = []proto.ExtensionRange{ - {Start: 100, End: 536870911}, -} - -func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OtherBase -} -func (m *OtherBase) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OtherBase.Unmarshal(m, b) -} -func (m *OtherBase) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OtherBase.Marshal(b, m, deterministic) -} -func (dst *OtherBase) XXX_Merge(src proto.Message) { - xxx_messageInfo_OtherBase.Merge(dst, src) -} -func (m *OtherBase) XXX_Size() int { - return xxx_messageInfo_OtherBase.Size(m) -} -func (m *OtherBase) XXX_DiscardUnknown() { - xxx_messageInfo_OtherBase.DiscardUnknown(m) -} - -var xxx_messageInfo_OtherBase proto.InternalMessageInfo - -func (m *OtherBase) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type ReplyExtensions struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } -func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } -func (*ReplyExtensions) ProtoMessage() {} -func (*ReplyExtensions) Descriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{3} -} -func (m *ReplyExtensions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReplyExtensions.Unmarshal(m, b) -} -func (m *ReplyExtensions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReplyExtensions.Marshal(b, m, deterministic) -} -func (dst *ReplyExtensions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplyExtensions.Merge(dst, src) -} -func (m *ReplyExtensions) XXX_Size() int { - return xxx_messageInfo_ReplyExtensions.Size(m) -} -func (m *ReplyExtensions) XXX_DiscardUnknown() { - xxx_messageInfo_ReplyExtensions.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplyExtensions proto.InternalMessageInfo - -var E_ReplyExtensions_Time = &proto.ExtensionDesc{ - ExtendedType: (*Reply)(nil), - ExtensionType: (*float64)(nil), - Field: 101, - Name: "my.test.ReplyExtensions.time", - Tag: "fixed64,101,opt,name=time", - Filename: "my_test/test.proto", -} - -var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{ - ExtendedType: (*Reply)(nil), - ExtensionType: (*ReplyExtensions)(nil), - Field: 105, - Name: "my.test.ReplyExtensions.carrot", - Tag: "bytes,105,opt,name=carrot", - Filename: "my_test/test.proto", -} - -var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ - ExtendedType: (*OtherBase)(nil), - ExtensionType: (*ReplyExtensions)(nil), - Field: 101, - Name: "my.test.ReplyExtensions.donut", - Tag: "bytes,101,opt,name=donut", - Filename: "my_test/test.proto", -} - -type OtherReplyExtensions struct { - Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } -func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } -func (*OtherReplyExtensions) ProtoMessage() {} -func (*OtherReplyExtensions) Descriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{4} -} -func (m *OtherReplyExtensions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OtherReplyExtensions.Unmarshal(m, b) -} -func (m *OtherReplyExtensions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OtherReplyExtensions.Marshal(b, m, deterministic) -} -func (dst *OtherReplyExtensions) XXX_Merge(src proto.Message) { - xxx_messageInfo_OtherReplyExtensions.Merge(dst, src) -} -func (m *OtherReplyExtensions) XXX_Size() int { - return xxx_messageInfo_OtherReplyExtensions.Size(m) -} -func (m *OtherReplyExtensions) XXX_DiscardUnknown() { - xxx_messageInfo_OtherReplyExtensions.DiscardUnknown(m) -} - -var xxx_messageInfo_OtherReplyExtensions proto.InternalMessageInfo - -func (m *OtherReplyExtensions) GetKey() int32 { - if m != nil && m.Key != nil { - return *m.Key - } - return 0 -} - -type OldReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `protobuf_messageset:"1" json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OldReply) Reset() { *m = OldReply{} } -func (m *OldReply) String() string { return proto.CompactTextString(m) } -func (*OldReply) ProtoMessage() {} -func (*OldReply) Descriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{5} -} - -func (m *OldReply) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) -} -func (m *OldReply) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) -} - -var extRange_OldReply = []proto.ExtensionRange{ - {Start: 100, End: 2147483646}, -} - -func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OldReply -} -func (m *OldReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OldReply.Unmarshal(m, b) -} -func (m *OldReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OldReply.Marshal(b, m, deterministic) -} -func (dst *OldReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_OldReply.Merge(dst, src) -} -func (m *OldReply) XXX_Size() int { - return xxx_messageInfo_OldReply.Size(m) -} -func (m *OldReply) XXX_DiscardUnknown() { - xxx_messageInfo_OldReply.DiscardUnknown(m) -} - -var xxx_messageInfo_OldReply proto.InternalMessageInfo - -type Communique struct { - MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` - // This is a oneof, called "union". - // - // Types that are valid to be assigned to Union: - // *Communique_Number - // *Communique_Name - // *Communique_Data - // *Communique_TempC - // *Communique_Height - // *Communique_Today - // *Communique_Maybe - // *Communique_Delta_ - // *Communique_Msg - // *Communique_Somegroup - Union isCommunique_Union `protobuf_oneof:"union"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Communique) Reset() { *m = Communique{} } -func (m *Communique) String() string { return proto.CompactTextString(m) } -func (*Communique) ProtoMessage() {} -func (*Communique) Descriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{6} -} -func (m *Communique) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Communique.Unmarshal(m, b) -} -func (m *Communique) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Communique.Marshal(b, m, deterministic) -} -func (dst *Communique) XXX_Merge(src proto.Message) { - xxx_messageInfo_Communique.Merge(dst, src) -} -func (m *Communique) XXX_Size() int { - return xxx_messageInfo_Communique.Size(m) -} -func (m *Communique) XXX_DiscardUnknown() { - xxx_messageInfo_Communique.DiscardUnknown(m) -} - -var xxx_messageInfo_Communique proto.InternalMessageInfo - -func (m *Communique) GetMakeMeCry() bool { - if m != nil && m.MakeMeCry != nil { - return *m.MakeMeCry - } - return false -} - -type isCommunique_Union interface { - isCommunique_Union() -} - -type Communique_Number struct { - Number int32 `protobuf:"varint,5,opt,name=number,oneof"` -} - -type Communique_Name struct { - Name string `protobuf:"bytes,6,opt,name=name,oneof"` -} - -type Communique_Data struct { - Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` -} - -type Communique_TempC struct { - TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` -} - -type Communique_Height struct { - Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"` -} - -type Communique_Today struct { - Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"` -} - -type Communique_Maybe struct { - Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"` -} - -type Communique_Delta_ struct { - Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"` -} - -type Communique_Msg struct { - Msg *Reply `protobuf:"bytes,16,opt,name=msg,oneof"` -} - -type Communique_Somegroup struct { - Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` -} - -func (*Communique_Number) isCommunique_Union() {} - -func (*Communique_Name) isCommunique_Union() {} - -func (*Communique_Data) isCommunique_Union() {} - -func (*Communique_TempC) isCommunique_Union() {} - -func (*Communique_Height) isCommunique_Union() {} - -func (*Communique_Today) isCommunique_Union() {} - -func (*Communique_Maybe) isCommunique_Union() {} - -func (*Communique_Delta_) isCommunique_Union() {} - -func (*Communique_Msg) isCommunique_Union() {} - -func (*Communique_Somegroup) isCommunique_Union() {} - -func (m *Communique) GetUnion() isCommunique_Union { - if m != nil { - return m.Union - } - return nil -} - -func (m *Communique) GetNumber() int32 { - if x, ok := m.GetUnion().(*Communique_Number); ok { - return x.Number - } - return 0 -} - -func (m *Communique) GetName() string { - if x, ok := m.GetUnion().(*Communique_Name); ok { - return x.Name - } - return "" -} - -func (m *Communique) GetData() []byte { - if x, ok := m.GetUnion().(*Communique_Data); ok { - return x.Data - } - return nil -} - -func (m *Communique) GetTempC() float64 { - if x, ok := m.GetUnion().(*Communique_TempC); ok { - return x.TempC - } - return 0 -} - -func (m *Communique) GetHeight() float32 { - if x, ok := m.GetUnion().(*Communique_Height); ok { - return x.Height - } - return 0 -} - -func (m *Communique) GetToday() Days { - if x, ok := m.GetUnion().(*Communique_Today); ok { - return x.Today - } - return Days_MONDAY -} - -func (m *Communique) GetMaybe() bool { - if x, ok := m.GetUnion().(*Communique_Maybe); ok { - return x.Maybe - } - return false -} - -func (m *Communique) GetDelta() int32 { - if x, ok := m.GetUnion().(*Communique_Delta_); ok { - return x.Delta - } - return 0 -} - -func (m *Communique) GetMsg() *Reply { - if x, ok := m.GetUnion().(*Communique_Msg); ok { - return x.Msg - } - return nil -} - -func (m *Communique) GetSomegroup() *Communique_SomeGroup { - if x, ok := m.GetUnion().(*Communique_Somegroup); ok { - return x.Somegroup - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ - (*Communique_Number)(nil), - (*Communique_Name)(nil), - (*Communique_Data)(nil), - (*Communique_TempC)(nil), - (*Communique_Height)(nil), - (*Communique_Today)(nil), - (*Communique_Maybe)(nil), - (*Communique_Delta_)(nil), - (*Communique_Msg)(nil), - (*Communique_Somegroup)(nil), - } -} - -func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Communique) - // union - switch x := m.Union.(type) { - case *Communique_Number: - b.EncodeVarint(5<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Number)) - case *Communique_Name: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Name) - case *Communique_Data: - b.EncodeVarint(7<<3 | proto.WireBytes) - b.EncodeRawBytes(x.Data) - case *Communique_TempC: - b.EncodeVarint(8<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.TempC)) - case *Communique_Height: - b.EncodeVarint(9<<3 | proto.WireFixed32) - b.EncodeFixed32(uint64(math.Float32bits(x.Height))) - case *Communique_Today: - b.EncodeVarint(10<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Today)) - case *Communique_Maybe: - t := uint64(0) - if x.Maybe { - t = 1 - } - b.EncodeVarint(11<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *Communique_Delta_: - b.EncodeVarint(12<<3 | proto.WireVarint) - b.EncodeZigzag32(uint64(x.Delta)) - case *Communique_Msg: - b.EncodeVarint(16<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Msg); err != nil { - return err - } - case *Communique_Somegroup: - b.EncodeVarint(14<<3 | proto.WireStartGroup) - if err := b.Marshal(x.Somegroup); err != nil { - return err - } - b.EncodeVarint(14<<3 | proto.WireEndGroup) - case nil: - default: - return fmt.Errorf("Communique.Union has unexpected type %T", x) - } - return nil -} - -func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Communique) - switch tag { - case 5: // union.number - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Number{int32(x)} - return true, err - case 6: // union.name - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &Communique_Name{x} - return true, err - case 7: // union.data - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Union = &Communique_Data{x} - return true, err - case 8: // union.temp_c - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Union = &Communique_TempC{math.Float64frombits(x)} - return true, err - case 9: // union.height - if wire != proto.WireFixed32 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed32() - m.Union = &Communique_Height{math.Float32frombits(uint32(x))} - return true, err - case 10: // union.today - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Today{Days(x)} - return true, err - case 11: // union.maybe - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Maybe{x != 0} - return true, err - case 12: // union.delta - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag32() - m.Union = &Communique_Delta_{int32(x)} - return true, err - case 16: // union.msg - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Reply) - err := b.DecodeMessage(msg) - m.Union = &Communique_Msg{msg} - return true, err - case 14: // union.somegroup - if wire != proto.WireStartGroup { - return true, proto.ErrInternalBadWireType - } - msg := new(Communique_SomeGroup) - err := b.DecodeGroup(msg) - m.Union = &Communique_Somegroup{msg} - return true, err - default: - return false, nil - } -} - -func _Communique_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Communique) - // union - switch x := m.Union.(type) { - case *Communique_Number: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.Number)) - case *Communique_Name: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Name))) - n += len(x.Name) - case *Communique_Data: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Data))) - n += len(x.Data) - case *Communique_TempC: - n += 1 // tag and wire - n += 8 - case *Communique_Height: - n += 1 // tag and wire - n += 4 - case *Communique_Today: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.Today)) - case *Communique_Maybe: - n += 1 // tag and wire - n += 1 - case *Communique_Delta_: - n += 1 // tag and wire - n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31)))) - case *Communique_Msg: - s := proto.Size(x.Msg) - n += 2 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *Communique_Somegroup: - n += 1 // tag and wire - n += proto.Size(x.Somegroup) - n += 1 // tag and wire - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Communique_SomeGroup struct { - Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} } -func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*Communique_SomeGroup) ProtoMessage() {} -func (*Communique_SomeGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{6, 0} -} -func (m *Communique_SomeGroup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Communique_SomeGroup.Unmarshal(m, b) -} -func (m *Communique_SomeGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Communique_SomeGroup.Marshal(b, m, deterministic) -} -func (dst *Communique_SomeGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_Communique_SomeGroup.Merge(dst, src) -} -func (m *Communique_SomeGroup) XXX_Size() int { - return xxx_messageInfo_Communique_SomeGroup.Size(m) -} -func (m *Communique_SomeGroup) XXX_DiscardUnknown() { - xxx_messageInfo_Communique_SomeGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_Communique_SomeGroup proto.InternalMessageInfo - -func (m *Communique_SomeGroup) GetMember() string { - if m != nil && m.Member != nil { - return *m.Member - } - return "" -} - -type Communique_Delta struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Communique_Delta) Reset() { *m = Communique_Delta{} } -func (m *Communique_Delta) String() string { return proto.CompactTextString(m) } -func (*Communique_Delta) ProtoMessage() {} -func (*Communique_Delta) Descriptor() ([]byte, []int) { - return fileDescriptor_test_2309d445eee26af7, []int{6, 1} -} -func (m *Communique_Delta) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Communique_Delta.Unmarshal(m, b) -} -func (m *Communique_Delta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Communique_Delta.Marshal(b, m, deterministic) -} -func (dst *Communique_Delta) XXX_Merge(src proto.Message) { - xxx_messageInfo_Communique_Delta.Merge(dst, src) -} -func (m *Communique_Delta) XXX_Size() int { - return xxx_messageInfo_Communique_Delta.Size(m) -} -func (m *Communique_Delta) XXX_DiscardUnknown() { - xxx_messageInfo_Communique_Delta.DiscardUnknown(m) -} - -var xxx_messageInfo_Communique_Delta proto.InternalMessageInfo - -var E_Tag = &proto.ExtensionDesc{ - ExtendedType: (*Reply)(nil), - ExtensionType: (*string)(nil), - Field: 103, - Name: "my.test.tag", - Tag: "bytes,103,opt,name=tag", - Filename: "my_test/test.proto", -} - -var E_Donut = &proto.ExtensionDesc{ - ExtendedType: (*Reply)(nil), - ExtensionType: (*OtherReplyExtensions)(nil), - Field: 106, - Name: "my.test.donut", - Tag: "bytes,106,opt,name=donut", - Filename: "my_test/test.proto", -} - -func init() { - proto.RegisterType((*Request)(nil), "my.test.Request") - proto.RegisterMapType((map[int64]*Reply)(nil), "my.test.Request.MsgMappingEntry") - proto.RegisterMapType((map[int32]string)(nil), "my.test.Request.NameMappingEntry") - proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup") - proto.RegisterType((*Reply)(nil), "my.test.Reply") - proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry") - proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase") - proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions") - proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions") - proto.RegisterType((*OldReply)(nil), "my.test.OldReply") - proto.RegisterType((*Communique)(nil), "my.test.Communique") - proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup") - proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta") - proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value) - proto.RegisterEnum("my.test.Days", Days_name, Days_value) - proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value) - proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value) - proto.RegisterExtension(E_ReplyExtensions_Time) - proto.RegisterExtension(E_ReplyExtensions_Carrot) - proto.RegisterExtension(E_ReplyExtensions_Donut) - proto.RegisterExtension(E_Tag) - proto.RegisterExtension(E_Donut) -} - -func init() { proto.RegisterFile("my_test/test.proto", fileDescriptor_test_2309d445eee26af7) } - -var fileDescriptor_test_2309d445eee26af7 = []byte{ - // 1033 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x55, 0xdd, 0x6e, 0xe3, 0x44, - 0x14, 0xce, 0xd8, 0x71, 0x7e, 0x4e, 0x42, 0x6b, 0x46, 0x55, 0x6b, 0x05, 0xed, 0xd6, 0x04, 0x8a, - 0x4c, 0xc5, 0xa6, 0xda, 0x80, 0xc4, 0x2a, 0x88, 0xd5, 0x36, 0x3f, 0x6d, 0xaa, 0x6d, 0x12, 0x69, - 0xda, 0x5e, 0xb0, 0x37, 0xd6, 0x34, 0x9e, 0x3a, 0xa6, 0x19, 0x3b, 0x6b, 0x8f, 0x11, 0xbe, 0xeb, - 0x53, 0xc0, 0x6b, 0x70, 0xcf, 0x0b, 0xf1, 0x16, 0x45, 0x33, 0x0e, 0x49, 0xda, 0xa0, 0xbd, 0xb1, - 0x7c, 0xce, 0xf9, 0xce, 0xe7, 0x39, 0x3f, 0xfe, 0x06, 0x30, 0xcf, 0x5c, 0xc1, 0x12, 0x71, 0x22, - 0x1f, 0xad, 0x45, 0x1c, 0x89, 0x08, 0x97, 0x79, 0xd6, 0x92, 0x66, 0x03, 0xf3, 0x74, 0x2e, 0x82, - 0x13, 0xf5, 0x7c, 0x9d, 0x07, 0x9b, 0xff, 0x14, 0xa1, 0x4c, 0xd8, 0xc7, 0x94, 0x25, 0x02, 0x9b, - 0xa0, 0xdf, 0xb3, 0xcc, 0x42, 0xb6, 0xee, 0xe8, 0x44, 0xbe, 0x62, 0x07, 0xf4, 0x59, 0xca, 0x2c, - 0xdd, 0x46, 0xce, 0x4e, 0x7b, 0xbf, 0xb5, 0x24, 0x6a, 0x2d, 0x13, 0x5a, 0xbd, 0x68, 0x1e, 0xc5, - 0x44, 0x42, 0xf0, 0x31, 0xe8, 0x33, 0x2a, 0xac, 0xa2, 0x42, 0x9a, 0x2b, 0xe4, 0x90, 0x8a, 0xeb, - 0x6c, 0xc1, 0x3a, 0xa5, 0xb3, 0x41, 0x7f, 0x42, 0x4e, 0x89, 0x04, 0xe1, 0x43, 0xa8, 0x78, 0x8c, - 0x7a, 0xf3, 0x20, 0x64, 0x56, 0xd9, 0x46, 0x8e, 0xd6, 0xd1, 0x83, 0xf0, 0x8e, 0xac, 0x9c, 0xf8, - 0x0d, 0x54, 0x93, 0x88, 0x33, 0x3f, 0x8e, 0xd2, 0x85, 0x55, 0xb1, 0x91, 0x03, 0xed, 0xc6, 0xd6, - 0xc7, 0xaf, 0x22, 0xce, 0xce, 0x25, 0x82, 0xac, 0xc1, 0xb8, 0x0f, 0xf5, 0x90, 0x72, 0xe6, 0x72, - 0xba, 0x58, 0x04, 0xa1, 0x6f, 0xed, 0xd8, 0xba, 0x53, 0x6b, 0x7f, 0xb9, 0x95, 0x3c, 0xa6, 0x9c, - 0x8d, 0x72, 0xcc, 0x20, 0x14, 0x71, 0x46, 0x6a, 0xe1, 0xda, 0x83, 0x4f, 0xa1, 0xc6, 0x13, 0x7f, - 0x45, 0xb2, 0xab, 0x48, 0xec, 0x2d, 0x92, 0x51, 0xe2, 0x3f, 0xe1, 0x00, 0xbe, 0x72, 0xe0, 0x3d, - 0x30, 0x62, 0x96, 0x30, 0x61, 0xd5, 0x6d, 0xe4, 0x18, 0x24, 0x37, 0xf0, 0x01, 0x94, 0x7d, 0x26, - 0x5c, 0xd9, 0x65, 0xd3, 0x46, 0x4e, 0x95, 0x94, 0x7c, 0x26, 0xde, 0xb3, 0xac, 0xf1, 0x1d, 0x54, - 0x57, 0xf5, 0xe0, 0x43, 0xa8, 0xa9, 0x6a, 0xdc, 0xbb, 0x80, 0xcd, 0x3d, 0xab, 0xaa, 0x18, 0x40, - 0xb9, 0xce, 0xa4, 0xa7, 0xf1, 0x16, 0xcc, 0xe7, 0x05, 0xac, 0x87, 0x27, 0xc1, 0x6a, 0x78, 0x7b, - 0x60, 0xfc, 0x46, 0xe7, 0x29, 0xb3, 0x34, 0xf5, 0xa9, 0xdc, 0xe8, 0x68, 0x6f, 0x50, 0x63, 0x04, - 0xbb, 0xcf, 0xce, 0xbe, 0x99, 0x8e, 0xf3, 0xf4, 0xaf, 0x37, 0xd3, 0x6b, 0xed, 0x9d, 0x8d, 0xf2, - 0x17, 0xf3, 0x6c, 0x83, 0xae, 0x79, 0x04, 0x86, 0xda, 0x04, 0x5c, 0x06, 0x9d, 0x0c, 0xfa, 0x66, - 0x01, 0x57, 0xc1, 0x38, 0x27, 0x83, 0xc1, 0xd8, 0x44, 0xb8, 0x02, 0xc5, 0xee, 0xe5, 0xcd, 0xc0, - 0xd4, 0x9a, 0x7f, 0x6a, 0x60, 0xa8, 0x5c, 0x7c, 0x0c, 0xc6, 0x5d, 0x94, 0x86, 0x9e, 0x5a, 0xb5, - 0x5a, 0x7b, 0xef, 0x29, 0x75, 0x2b, 0xef, 0x66, 0x0e, 0xc1, 0x47, 0x50, 0x9f, 0x46, 0x7c, 0x41, - 0xa7, 0xaa, 0x6d, 0x89, 0xa5, 0xd9, 0xba, 0x63, 0x74, 0x35, 0x13, 0x91, 0xda, 0xd2, 0xff, 0x9e, - 0x65, 0x49, 0xe3, 0x2f, 0x04, 0x46, 0x5e, 0x49, 0x1f, 0x0e, 0xef, 0x59, 0xe6, 0x8a, 0x19, 0x15, - 0x6e, 0xc8, 0x98, 0x97, 0xb8, 0xaf, 0xdb, 0xdf, 0xff, 0x30, 0xa5, 0x9c, 0xcd, 0xdd, 0x1e, 0x4d, - 0x2e, 0x42, 0xdf, 0x42, 0xb6, 0xe6, 0xe8, 0xe4, 0x8b, 0x7b, 0x96, 0x5d, 0xcf, 0xa8, 0x18, 0x4b, - 0xd0, 0x0a, 0x93, 0x43, 0xf0, 0xc1, 0x66, 0xf5, 0x7a, 0x07, 0xfd, 0xb8, 0x2c, 0x18, 0x7f, 0x03, - 0xa6, 0xcb, 0xb3, 0x7c, 0x34, 0xae, 0xda, 0xb5, 0xb6, 0xfa, 0x3f, 0x74, 0x52, 0x1f, 0x65, 0x6a, - 0x3c, 0x72, 0x34, 0xed, 0xa6, 0x0d, 0xc5, 0x73, 0xca, 0x19, 0xae, 0x43, 0xe5, 0x6c, 0x32, 0xb9, - 0xee, 0x9e, 0x5e, 0x5e, 0x9a, 0x08, 0x03, 0x94, 0xae, 0x07, 0xe3, 0xf1, 0xc5, 0x95, 0xa9, 0x1d, - 0x57, 0x2a, 0x9e, 0xf9, 0xf0, 0xf0, 0xf0, 0xa0, 0x35, 0xbf, 0x85, 0xea, 0x44, 0xcc, 0x58, 0xdc, - 0xa5, 0x09, 0xc3, 0x18, 0x8a, 0x92, 0x56, 0x8d, 0xa2, 0x4a, 0xd4, 0xfb, 0x06, 0xf4, 0x6f, 0x04, - 0xbb, 0xaa, 0x4b, 0x83, 0xdf, 0x05, 0x0b, 0x93, 0x20, 0x0a, 0x93, 0x76, 0x13, 0x8a, 0x22, 0xe0, - 0x0c, 0x3f, 0x1b, 0x91, 0xc5, 0x6c, 0xe4, 0x20, 0xa2, 0x62, 0xed, 0x77, 0x50, 0x9a, 0xd2, 0x38, - 0x8e, 0xc4, 0x16, 0x2a, 0x50, 0xe3, 0xb5, 0x9e, 0x7a, 0xd7, 0xec, 0x64, 0x99, 0xd7, 0xee, 0x82, - 0xe1, 0x45, 0x61, 0x2a, 0x30, 0x5e, 0x41, 0x57, 0x87, 0x56, 0x9f, 0xfa, 0x14, 0x49, 0x9e, 0xda, - 0x74, 0x60, 0x4f, 0xe5, 0x3c, 0x0b, 0x6f, 0x2f, 0x6f, 0xd3, 0x82, 0xca, 0x64, 0xee, 0x29, 0x9c, - 0xaa, 0xfe, 0xf1, 0xf1, 0xf1, 0xb1, 0xdc, 0xd1, 0x2a, 0xa8, 0xf9, 0x87, 0x0e, 0xd0, 0x8b, 0x38, - 0x4f, 0xc3, 0xe0, 0x63, 0xca, 0xf0, 0x4b, 0xa8, 0x71, 0x7a, 0xcf, 0x5c, 0xce, 0xdc, 0x69, 0x9c, - 0x53, 0x54, 0x48, 0x55, 0xba, 0x46, 0xac, 0x17, 0x67, 0xd8, 0x82, 0x52, 0x98, 0xf2, 0x5b, 0x16, - 0x5b, 0x86, 0x64, 0x1f, 0x16, 0xc8, 0xd2, 0xc6, 0x7b, 0xcb, 0x46, 0x97, 0x64, 0xa3, 0x87, 0x85, - 0xbc, 0xd5, 0xd2, 0xeb, 0x51, 0x41, 0x95, 0x30, 0xd5, 0xa5, 0x57, 0x5a, 0xf8, 0x00, 0x4a, 0x82, - 0xf1, 0x85, 0x3b, 0x55, 0x72, 0x84, 0x86, 0x05, 0x62, 0x48, 0xbb, 0x27, 0xe9, 0x67, 0x2c, 0xf0, - 0x67, 0x42, 0xfd, 0xa6, 0x9a, 0xa4, 0xcf, 0x6d, 0x7c, 0x04, 0x86, 0x88, 0x3c, 0x9a, 0x59, 0xa0, - 0x34, 0xf1, 0xb3, 0x55, 0x6f, 0xfa, 0x34, 0x4b, 0x14, 0x81, 0x8c, 0xe2, 0x7d, 0x30, 0x38, 0xcd, - 0x6e, 0x99, 0x55, 0x93, 0x27, 0x97, 0x7e, 0x65, 0x4a, 0xbf, 0xc7, 0xe6, 0x82, 0x2a, 0x01, 0xf9, - 0x5c, 0xfa, 0x95, 0x89, 0x9b, 0xa0, 0xf3, 0xc4, 0x57, 0xf2, 0xb1, 0xf5, 0x53, 0x0e, 0x0b, 0x44, - 0x06, 0xf1, 0xcf, 0x9b, 0xfa, 0xb9, 0xa3, 0xf4, 0xf3, 0xc5, 0x0a, 0xb9, 0xee, 0xdd, 0x5a, 0x42, - 0x87, 0x85, 0x0d, 0x11, 0x6d, 0x7c, 0xb5, 0x29, 0x46, 0xfb, 0x50, 0xe2, 0x4c, 0xf5, 0x6f, 0x37, - 0x57, 0xac, 0xdc, 0x6a, 0x94, 0xc1, 0xe8, 0xcb, 0x03, 0x75, 0xcb, 0x60, 0xa4, 0x61, 0x10, 0x85, - 0xc7, 0x2f, 0xa1, 0xbc, 0x94, 0x7b, 0xb9, 0xe6, 0xb9, 0xe0, 0x9b, 0x48, 0x8a, 0xc2, 0xd9, 0xe0, - 0x83, 0xa9, 0x1d, 0xb7, 0xa0, 0x28, 0x4b, 0x97, 0xc1, 0xd1, 0x64, 0xdc, 0x3f, 0xfd, 0xc5, 0x44, - 0xb8, 0x06, 0xe5, 0xeb, 0x9b, 0xc1, 0x95, 0x34, 0x34, 0xa9, 0x1a, 0x97, 0x37, 0xe3, 0xfe, 0x85, - 0x89, 0x1a, 0x9a, 0x89, 0x3a, 0x36, 0xe8, 0x82, 0xfa, 0x5b, 0xfb, 0xea, 0xab, 0x63, 0xc8, 0x50, - 0xa7, 0xf7, 0xdf, 0x4a, 0x3e, 0xc7, 0xfc, 0xaa, 0xba, 0xf3, 0xe2, 0xe9, 0xa2, 0xfe, 0xff, 0x4e, - 0x76, 0xdf, 0x7d, 0x78, 0xeb, 0x07, 0x62, 0x96, 0xde, 0xb6, 0xa6, 0x11, 0x3f, 0xf1, 0xa3, 0x39, - 0x0d, 0xfd, 0x13, 0x75, 0x39, 0xde, 0xa6, 0x77, 0xf9, 0xcb, 0xf4, 0x95, 0xcf, 0xc2, 0x57, 0x7e, - 0xa4, 0x6e, 0x55, 0xb9, 0x0f, 0x27, 0xcb, 0x6b, 0xf6, 0x27, 0xf9, 0xf8, 0x37, 0x00, 0x00, 0xff, - 0xff, 0x12, 0xd5, 0x46, 0x00, 0x75, 0x07, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto deleted file mode 100644 index 1ef3fd02..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto +++ /dev/null @@ -1,158 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; - -// This package holds interesting messages. -package my.test; // dotted package name - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/my_test;test"; - -//import "imp.proto"; -import "multi/multi1.proto"; // unused import - -enum HatType { - // deliberately skipping 0 - FEDORA = 1; - FEZ = 2; -} - -// This enum represents days of the week. -enum Days { - option allow_alias = true; - - MONDAY = 1; - TUESDAY = 2; - LUNDI = 1; // same value as MONDAY -} - -// This is a message that might be sent somewhere. -message Request { - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - } - repeated int64 key = 1; -// optional imp.ImportedMessage imported_message = 2; - optional Color hue = 3; // no default - optional HatType hat = 4 [default=FEDORA]; -// optional imp.ImportedMessage.Owner owner = 6; - optional float deadline = 7 [default=inf]; - optional group SomeGroup = 8 { - optional int32 group_field = 9; - } - - // These foreign types are in imp2.proto, - // which is publicly imported by imp.proto. -// optional imp.PubliclyImportedMessage pub = 10; -// optional imp.PubliclyImportedEnum pub_enum = 13 [default=HAIR]; - - - // This is a map field. It will generate map[int32]string. - map name_mapping = 14; - // This is a map field whose value type is a message. - map msg_mapping = 15; - - optional int32 reset = 12; - // This field should not conflict with any getters. - optional string get_key = 16; -} - -message Reply { - message Entry { - required int64 key_that_needs_1234camel_CasIng = 1; - optional int64 value = 2 [default=7]; - optional int64 _my_field_name_2 = 3; - enum Game { - FOOTBALL = 1; - TENNIS = 2; - } - } - repeated Entry found = 1; - repeated int32 compact_keys = 2 [packed=true]; - extensions 100 to max; -} - -message OtherBase { - optional string name = 1; - extensions 100 to max; -} - -message ReplyExtensions { - extend Reply { - optional double time = 101; - optional ReplyExtensions carrot = 105; - } - extend OtherBase { - optional ReplyExtensions donut = 101; - } -} - -message OtherReplyExtensions { - optional int32 key = 1; -} - -// top-level extension -extend Reply { - optional string tag = 103; - optional OtherReplyExtensions donut = 106; -// optional imp.ImportedMessage elephant = 107; // extend with message from another file. -} - -message OldReply { - // Extensions will be encoded in MessageSet wire format. - option message_set_wire_format = true; - extensions 100 to max; -} - -message Communique { - optional bool make_me_cry = 1; - - // This is a oneof, called "union". - oneof union { - int32 number = 5; - string name = 6; - bytes data = 7; - double temp_c = 8; - float height = 9; - Days today = 10; - bool maybe = 11; - sint32 delta = 12; // name will conflict with Delta below - Reply msg = 16; // requires two bytes to encode field tag - group SomeGroup = 14 { - optional string member = 15; - } - } - - message Delta {} -} - diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.pb.go deleted file mode 100644 index 1ad010a1..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.pb.go +++ /dev/null @@ -1,196 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: proto3/proto3.proto - -package proto3 // import "github.com/golang/protobuf/protoc-gen-go/testdata/proto3" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Request_Flavour int32 - -const ( - Request_SWEET Request_Flavour = 0 - Request_SOUR Request_Flavour = 1 - Request_UMAMI Request_Flavour = 2 - Request_GOPHERLICIOUS Request_Flavour = 3 -) - -var Request_Flavour_name = map[int32]string{ - 0: "SWEET", - 1: "SOUR", - 2: "UMAMI", - 3: "GOPHERLICIOUS", -} -var Request_Flavour_value = map[string]int32{ - "SWEET": 0, - "SOUR": 1, - "UMAMI": 2, - "GOPHERLICIOUS": 3, -} - -func (x Request_Flavour) String() string { - return proto.EnumName(Request_Flavour_name, int32(x)) -} -func (Request_Flavour) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_proto3_a752e09251f17e01, []int{0, 0} -} - -type Request struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Key []int64 `protobuf:"varint,2,rep,packed,name=key,proto3" json:"key,omitempty"` - Taste Request_Flavour `protobuf:"varint,3,opt,name=taste,proto3,enum=proto3.Request_Flavour" json:"taste,omitempty"` - Book *Book `protobuf:"bytes,4,opt,name=book,proto3" json:"book,omitempty"` - Unpacked []int64 `protobuf:"varint,5,rep,name=unpacked,proto3" json:"unpacked,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_proto3_a752e09251f17e01, []int{0} -} -func (m *Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Request.Unmarshal(m, b) -} -func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Request.Marshal(b, m, deterministic) -} -func (dst *Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request.Merge(dst, src) -} -func (m *Request) XXX_Size() int { - return xxx_messageInfo_Request.Size(m) -} -func (m *Request) XXX_DiscardUnknown() { - xxx_messageInfo_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Request proto.InternalMessageInfo - -func (m *Request) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Request) GetKey() []int64 { - if m != nil { - return m.Key - } - return nil -} - -func (m *Request) GetTaste() Request_Flavour { - if m != nil { - return m.Taste - } - return Request_SWEET -} - -func (m *Request) GetBook() *Book { - if m != nil { - return m.Book - } - return nil -} - -func (m *Request) GetUnpacked() []int64 { - if m != nil { - return m.Unpacked - } - return nil -} - -type Book struct { - Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` - RawData []byte `protobuf:"bytes,2,opt,name=raw_data,json=rawData,proto3" json:"raw_data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Book) Reset() { *m = Book{} } -func (m *Book) String() string { return proto.CompactTextString(m) } -func (*Book) ProtoMessage() {} -func (*Book) Descriptor() ([]byte, []int) { - return fileDescriptor_proto3_a752e09251f17e01, []int{1} -} -func (m *Book) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Book.Unmarshal(m, b) -} -func (m *Book) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Book.Marshal(b, m, deterministic) -} -func (dst *Book) XXX_Merge(src proto.Message) { - xxx_messageInfo_Book.Merge(dst, src) -} -func (m *Book) XXX_Size() int { - return xxx_messageInfo_Book.Size(m) -} -func (m *Book) XXX_DiscardUnknown() { - xxx_messageInfo_Book.DiscardUnknown(m) -} - -var xxx_messageInfo_Book proto.InternalMessageInfo - -func (m *Book) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *Book) GetRawData() []byte { - if m != nil { - return m.RawData - } - return nil -} - -func init() { - proto.RegisterType((*Request)(nil), "proto3.Request") - proto.RegisterType((*Book)(nil), "proto3.Book") - proto.RegisterEnum("proto3.Request_Flavour", Request_Flavour_name, Request_Flavour_value) -} - -func init() { proto.RegisterFile("proto3/proto3.proto", fileDescriptor_proto3_a752e09251f17e01) } - -var fileDescriptor_proto3_a752e09251f17e01 = []byte{ - // 306 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x90, 0xcf, 0x4e, 0xf2, 0x40, - 0x14, 0xc5, 0x99, 0xfe, 0xf9, 0x80, 0xfb, 0xa1, 0x19, 0xaf, 0x26, 0x8e, 0x1b, 0x33, 0x61, 0xd5, - 0x0d, 0x25, 0xc1, 0x85, 0xc6, 0xb8, 0x11, 0x45, 0x25, 0x91, 0x60, 0x06, 0x89, 0x89, 0x1b, 0x33, - 0x85, 0xb1, 0x92, 0x42, 0x07, 0xcb, 0x54, 0xe2, 0xcb, 0xfa, 0x2c, 0xa6, 0x9d, 0xe2, 0xea, 0x9e, - 0x7b, 0xe7, 0xe4, 0x77, 0x32, 0x07, 0x0e, 0xd7, 0x99, 0x36, 0xfa, 0xac, 0x6b, 0x47, 0x58, 0x0e, - 0xfc, 0x67, 0xb7, 0xf6, 0x0f, 0x81, 0xba, 0x50, 0x9f, 0xb9, 0xda, 0x18, 0x44, 0xf0, 0x52, 0xb9, - 0x52, 0x8c, 0x70, 0x12, 0x34, 0x45, 0xa9, 0x91, 0x82, 0x9b, 0xa8, 0x6f, 0xe6, 0x70, 0x37, 0x70, - 0x45, 0x21, 0xb1, 0x03, 0xbe, 0x91, 0x1b, 0xa3, 0x98, 0xcb, 0x49, 0xb0, 0xdf, 0x3b, 0x0e, 0x2b, - 0x6e, 0x45, 0x09, 0xef, 0x96, 0xf2, 0x4b, 0xe7, 0x99, 0xb0, 0x2e, 0xe4, 0xe0, 0x45, 0x5a, 0x27, - 0xcc, 0xe3, 0x24, 0xf8, 0xdf, 0x6b, 0xed, 0xdc, 0x7d, 0xad, 0x13, 0x51, 0xbe, 0xe0, 0x29, 0x34, - 0xf2, 0x74, 0x2d, 0x67, 0x89, 0x9a, 0x33, 0xbf, 0xc8, 0xe9, 0x3b, 0xb4, 0x26, 0xfe, 0x6e, 0xed, - 0x2b, 0xa8, 0x57, 0x4c, 0x6c, 0x82, 0x3f, 0x79, 0x19, 0x0c, 0x9e, 0x69, 0x0d, 0x1b, 0xe0, 0x4d, - 0xc6, 0x53, 0x41, 0x49, 0x71, 0x9c, 0x8e, 0xae, 0x47, 0x43, 0xea, 0xe0, 0x01, 0xec, 0xdd, 0x8f, - 0x9f, 0x1e, 0x06, 0xe2, 0x71, 0x78, 0x33, 0x1c, 0x4f, 0x27, 0xd4, 0x6d, 0x9f, 0x83, 0x57, 0x64, - 0xe1, 0x11, 0xf8, 0x66, 0x61, 0x96, 0xbb, 0xdf, 0xd9, 0x05, 0x4f, 0xa0, 0x91, 0xc9, 0xed, 0xdb, - 0x5c, 0x1a, 0xc9, 0x1c, 0x4e, 0x82, 0x96, 0xa8, 0x67, 0x72, 0x7b, 0x2b, 0x8d, 0xec, 0x5f, 0xbe, - 0x5e, 0xc4, 0x0b, 0xf3, 0x91, 0x47, 0xe1, 0x4c, 0xaf, 0xba, 0xb1, 0x5e, 0xca, 0x34, 0xb6, 0x1d, - 0x46, 0xf9, 0xbb, 0x15, 0xb3, 0x4e, 0xac, 0xd2, 0x4e, 0xac, 0xbb, 0x46, 0x6d, 0x4c, 0xc1, 0xa8, - 0x3a, 0x8e, 0xaa, 0x76, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xec, 0x71, 0xee, 0xdb, 0x7b, 0x01, - 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.proto deleted file mode 100644 index 79954e4e..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.proto +++ /dev/null @@ -1,55 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package proto3; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/proto3"; - -message Request { - enum Flavour { - SWEET = 0; - SOUR = 1; - UMAMI = 2; - GOPHERLICIOUS = 3; - } - string name = 1; - repeated int64 key = 2; - Flavour taste = 3; - Book book = 4; - repeated int64 unpacked = 5 [packed=false]; -} - -message Book { - string title = 1; - bytes raw_data = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 70276e8f..e729dcff 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -1,141 +1,165 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - import ( "fmt" - "reflect" "strings" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "github.com/golang/protobuf/ptypes/any" ) -const googleApis = "type.googleapis.com/" +const urlPrefix = "type.googleapis.com/" -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { +// AnyMessageName returns the message name contained in an anypb.Any message. +// Most type assertions should use the Is function instead. +func AnyMessageName(any *anypb.Any) (string, error) { + name, err := anyMessageName(any) + return string(name), err +} +func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { if any == nil { return "", fmt.Errorf("message is nil") } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { + name := protoreflect.FullName(any.TypeUrl) + if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) } - return any.TypeUrl[slash+1:], nil + return name, nil } -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) +// MarshalAny marshals the given message m into an anypb.Any message. +func MarshalAny(m proto.Message) (*anypb.Any, error) { + switch dm := m.(type) { + case DynamicAny: + m = dm.Message + case *DynamicAny: + if dm == nil { + return nil, proto.ErrNil + } + m = dm.Message + } + b, err := proto.Marshal(m) if err != nil { return nil, err } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message + return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil } -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) +// Empty returns a new message of the type specified in an anypb.Any message. +// It returns protoregistry.NotFound if the corresponding message type could not +// be resolved in the global registry. +func Empty(any *anypb.Any) (proto.Message, error) { + name, err := anyMessageName(any) if err != nil { return nil, err } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + mt, err := protoregistry.GlobalTypes.FindMessageByName(name) + if err != nil { + return nil, err } - return reflect.New(t.Elem()).Interface().(proto.Message), nil + return proto.MessageV1(mt.New().Interface()), nil } -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. +// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message +// into the provided message m. It returns an error if the target message +// does not match the type in the Any message or if an unmarshal error occurs. // -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { +// The target message m may be a *DynamicAny message. If the underlying message +// type could not be resolved, then this returns protoregistry.NotFound. +func UnmarshalAny(any *anypb.Any, m proto.Message) error { + if dm, ok := m.(*DynamicAny); ok { + if dm.Message == nil { var err error - d.Message, err = Empty(any) + dm.Message, err = Empty(any) if err != nil { return err } } - return UnmarshalAny(any, d.Message) + m = dm.Message } - aname, err := AnyMessageName(any) + anyName, err := AnyMessageName(any) if err != nil { return err } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + msgName := proto.MessageName(m) + if anyName != msgName { + return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) } - return proto.Unmarshal(any.Value, pb) + return proto.Unmarshal(any.Value, m) } -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), - // but it avoids scanning TypeUrl for the slash. - if any == nil { +// Is reports whether the Any message contains a message of the specified type. +func Is(any *anypb.Any, m proto.Message) bool { + if any == nil || m == nil { return false } - name := proto.MessageName(pb) - prefix := len(any.TypeUrl) - len(name) - return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name + name := proto.MessageName(m) + if !strings.HasSuffix(any.TypeUrl, name) { + return false + } + return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in an anypb.Any message. +// The allocated message is stored in the embedded proto.Message. +// +// Example: +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct{ proto.Message } + +func (m DynamicAny) String() string { + if m.Message == nil { + return "" + } + return m.Message.String() +} +func (m DynamicAny) Reset() { + if m.Message == nil { + return + } + m.Message.Reset() +} +func (m DynamicAny) ProtoMessage() { + return +} +func (m DynamicAny) ProtoReflect() protoreflect.Message { + if m.Message == nil { + return nil + } + return dynamicAny{proto.MessageReflect(m.Message)} +} + +type dynamicAny struct{ protoreflect.Message } + +func (m dynamicAny) Type() protoreflect.MessageType { + return dynamicAnyType{m.Message.Type()} +} +func (m dynamicAny) New() protoreflect.Message { + return dynamicAnyType{m.Message.Type()}.New() +} +func (m dynamicAny) Interface() protoreflect.ProtoMessage { + return DynamicAny{proto.MessageV1(m.Message.Interface())} +} + +type dynamicAnyType struct{ protoreflect.MessageType } + +func (t dynamicAnyType) New() protoreflect.Message { + return dynamicAny{t.MessageType.New()} +} +func (t dynamicAnyType) Zero() protoreflect.Message { + return dynamicAny{t.MessageType.Zero()} } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index e3c56d3f..0ef27d33 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,191 +1,62 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto +// source: github.com/golang/protobuf/ptypes/any/any.proto -package any // import "github.com/golang/protobuf/ptypes/any" +package any -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" +) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/any.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +type Any = anypb.Any -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. - // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: - // - // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_any_744b9ca530f228db, []int{0} -} -func (*Any) XXX_WellKnownType() string { return "Any" } -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) -} -func (dst *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(dst, src) +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) -} -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) -} - -var xxx_messageInfo_Any proto.InternalMessageInfo -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl - } - return "" +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } - -var fileDescriptor_any_744b9ca530f228db = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto deleted file mode 100644 index c7486676..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ /dev/null @@ -1,149 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. - // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: - // - // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any_test.go b/vendor/github.com/golang/protobuf/ptypes/any_test.go deleted file mode 100644 index 871c6de1..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/any_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -import ( - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/protoc-gen-go/descriptor" - "github.com/golang/protobuf/ptypes/any" -) - -func TestMarshalUnmarshal(t *testing.T) { - orig := &any.Any{Value: []byte("test")} - - packed, err := MarshalAny(orig) - if err != nil { - t.Errorf("MarshalAny(%+v): got: _, %v exp: _, nil", orig, err) - } - - unpacked := &any.Any{} - err = UnmarshalAny(packed, unpacked) - if err != nil || !proto.Equal(unpacked, orig) { - t.Errorf("got: %v, %+v; want nil, %+v", err, unpacked, orig) - } -} - -func TestIs(t *testing.T) { - a, err := MarshalAny(&pb.FileDescriptorProto{}) - if err != nil { - t.Fatal(err) - } - if Is(a, &pb.DescriptorProto{}) { - // No spurious match for message names of different length. - t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is") - } - if Is(a, &pb.EnumDescriptorProto{}) { - // No spurious match for message names of equal length. - t.Error("FileDescriptorProto is not an EnumDescriptorProto, but Is says it is") - } - if !Is(a, &pb.FileDescriptorProto{}) { - t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not") - } -} - -func TestIsDifferentUrlPrefixes(t *testing.T) { - m := &pb.FileDescriptorProto{} - a := &any.Any{TypeUrl: "foo/bar/" + proto.MessageName(m)} - if !Is(a, m) { - t.Errorf("message with type url %q didn't satisfy Is for type %q", a.TypeUrl, proto.MessageName(m)) - } -} - -func TestIsCornerCases(t *testing.T) { - m := &pb.FileDescriptorProto{} - if Is(nil, m) { - t.Errorf("message with nil type url incorrectly claimed to be %q", proto.MessageName(m)) - } - noPrefix := &any.Any{TypeUrl: proto.MessageName(m)} - if Is(noPrefix, m) { - t.Errorf("message with type url %q incorrectly claimed to be %q", noPrefix.TypeUrl, proto.MessageName(m)) - } - shortPrefix := &any.Any{TypeUrl: "/" + proto.MessageName(m)} - if !Is(shortPrefix, m) { - t.Errorf("message with type url %q didn't satisfy Is for type %q", shortPrefix.TypeUrl, proto.MessageName(m)) - } -} - -func TestUnmarshalDynamic(t *testing.T) { - want := &pb.FileDescriptorProto{Name: proto.String("foo")} - a, err := MarshalAny(want) - if err != nil { - t.Fatal(err) - } - var got DynamicAny - if err := UnmarshalAny(a, &got); err != nil { - t.Fatal(err) - } - if !proto.Equal(got.Message, want) { - t.Errorf("invalid result from UnmarshalAny, got %q want %q", got.Message, want) - } -} - -func TestEmpty(t *testing.T) { - want := &pb.FileDescriptorProto{} - a, err := MarshalAny(want) - if err != nil { - t.Fatal(err) - } - got, err := Empty(a) - if err != nil { - t.Fatal(err) - } - if !proto.Equal(got, want) { - t.Errorf("unequal empty message, got %q, want %q", got, want) - } - - // that's a valid type_url for a message which shouldn't be linked into this - // test binary. We want an error. - a.TypeUrl = "type.googleapis.com/google.protobuf.FieldMask" - if _, err := Empty(a); err == nil { - t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl) - } -} - -func TestEmptyCornerCases(t *testing.T) { - _, err := Empty(nil) - if err == nil { - t.Error("expected Empty for nil to fail") - } - want := &pb.FileDescriptorProto{} - noPrefix := &any.Any{TypeUrl: proto.MessageName(want)} - _, err = Empty(noPrefix) - if err == nil { - t.Errorf("expected Empty for any type %q to fail", noPrefix.TypeUrl) - } - shortPrefix := &any.Any{TypeUrl: "/" + proto.MessageName(want)} - got, err := Empty(shortPrefix) - if err != nil { - t.Errorf("Empty for any type %q failed: %s", shortPrefix.TypeUrl, err) - } - if !proto.Equal(got, want) { - t.Errorf("Empty for any type %q differs, got %q, want %q", shortPrefix.TypeUrl, got, want) - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index c0d595da..fb9edd5c 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -1,35 +1,6 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. -/* -Package ptypes contains code for interacting with well-known types. -*/ +// Package ptypes provides functionality for interacting with well-known types. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 65cb0f8e..6110ae8a 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -1,102 +1,72 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - import ( "errors" "fmt" "time" - durpb "github.com/golang/protobuf/ptypes/duration" + durationpb "github.com/golang/protobuf/ptypes/duration" ) +// Range of google.protobuf.Duration as specified in duration.proto. +// This is about 10,000 years in seconds. const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) minSeconds = -maxSeconds ) -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { +// Duration converts a durationpb.Duration to a time.Duration. +// Duration returns an error if dur is invalid or overflows a time.Duration. +func Duration(dur *durationpb.Duration) (time.Duration, error) { + if err := validateDuration(dur); err != nil { return 0, err } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + d := time.Duration(dur.Seconds) * time.Second + if int64(d/time.Second) != dur.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + if dur.Nanos != 0 { + d += time.Duration(dur.Nanos) * time.Nanosecond + if (d < 0) != (dur.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } } return d, nil } -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { +// DurationProto converts a time.Duration to a durationpb.Duration. +func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, + return &durationpb.Duration{ + Seconds: int64(secs), Nanos: int32(nanos), } } + +// validateDuration determines whether the durationpb.Duration is valid +// according to the definition in google/protobuf/duration.proto. +// A valid durpb.Duration may still be too large to fit into a time.Duration +// Note that the range of durationpb.Duration is about 10,000 years, +// while the range of time.Duration is about 290 years. +func validateDuration(dur *durationpb.Duration) error { + if dur == nil { + return errors.New("duration: nil Duration") + } + if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", dur) + } + if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", dur) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index a7beb2c4..d0079ee3 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,159 +1,63 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto +// source: github.com/golang/protobuf/ptypes/duration/duration.proto -package duration // import "github.com/golang/protobuf/ptypes/duration" +package duration -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" +) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/duration.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +type Duration = durationpb.Duration -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_duration_e7d612259e3f0613, []int{0} -} -func (*Duration) XXX_WellKnownType() string { return "Duration" } -func (m *Duration) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Duration.Unmarshal(m, b) -} -func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Duration.Marshal(b, m, deterministic) -} -func (dst *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(dst, src) +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *Duration) XXX_Size() int { - return xxx_messageInfo_Duration.Size(m) -} -func (m *Duration) XXX_DiscardUnknown() { - xxx_messageInfo_Duration.DiscardUnknown(m) -} - -var xxx_messageInfo_Duration proto.InternalMessageInfo -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { - proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) -} - -var fileDescriptor_duration_e7d612259e3f0613 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto deleted file mode 100644 index 975fce41..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ /dev/null @@ -1,117 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/duration"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration_test.go b/vendor/github.com/golang/protobuf/ptypes/duration_test.go deleted file mode 100644 index e00491a3..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -import ( - "math" - "testing" - "time" - - "github.com/golang/protobuf/proto" - durpb "github.com/golang/protobuf/ptypes/duration" -) - -const ( - minGoSeconds = math.MinInt64 / int64(1e9) - maxGoSeconds = math.MaxInt64 / int64(1e9) -) - -var durationTests = []struct { - proto *durpb.Duration - isValid bool - inRange bool - dur time.Duration -}{ - // The zero duration. - {&durpb.Duration{Seconds: 0, Nanos: 0}, true, true, 0}, - // Some ordinary non-zero durations. - {&durpb.Duration{Seconds: 100, Nanos: 0}, true, true, 100 * time.Second}, - {&durpb.Duration{Seconds: -100, Nanos: 0}, true, true, -100 * time.Second}, - {&durpb.Duration{Seconds: 100, Nanos: 987}, true, true, 100*time.Second + 987}, - {&durpb.Duration{Seconds: -100, Nanos: -987}, true, true, -(100*time.Second + 987)}, - // The largest duration representable in Go. - {&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64}, - // The smallest duration representable in Go. - {&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64}, - {nil, false, false, 0}, - {&durpb.Duration{Seconds: -100, Nanos: 987}, false, false, 0}, - {&durpb.Duration{Seconds: 100, Nanos: -987}, false, false, 0}, - {&durpb.Duration{Seconds: math.MinInt64, Nanos: 0}, false, false, 0}, - {&durpb.Duration{Seconds: math.MaxInt64, Nanos: 0}, false, false, 0}, - // The largest valid duration. - {&durpb.Duration{Seconds: maxSeconds, Nanos: 1e9 - 1}, true, false, 0}, - // The smallest valid duration. - {&durpb.Duration{Seconds: minSeconds, Nanos: -(1e9 - 1)}, true, false, 0}, - // The smallest invalid duration above the valid range. - {&durpb.Duration{Seconds: maxSeconds + 1, Nanos: 0}, false, false, 0}, - // The largest invalid duration below the valid range. - {&durpb.Duration{Seconds: minSeconds - 1, Nanos: -(1e9 - 1)}, false, false, 0}, - // One nanosecond past the largest duration representable in Go. - {&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0}, - // One nanosecond past the smallest duration representable in Go. - {&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0}, - // One second past the largest duration representable in Go. - {&durpb.Duration{Seconds: maxGoSeconds + 1, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0}, - // One second past the smallest duration representable in Go. - {&durpb.Duration{Seconds: minGoSeconds - 1, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0}, -} - -func TestValidateDuration(t *testing.T) { - for _, test := range durationTests { - err := validateDuration(test.proto) - gotValid := (err == nil) - if gotValid != test.isValid { - t.Errorf("validateDuration(%v) = %t, want %t", test.proto, gotValid, test.isValid) - } - } -} - -func TestDuration(t *testing.T) { - for _, test := range durationTests { - got, err := Duration(test.proto) - gotOK := (err == nil) - wantOK := test.isValid && test.inRange - if gotOK != wantOK { - t.Errorf("Duration(%v) ok = %t, want %t", test.proto, gotOK, wantOK) - } - if err == nil && got != test.dur { - t.Errorf("Duration(%v) = %v, want %v", test.proto, got, test.dur) - } - } -} - -func TestDurationProto(t *testing.T) { - for _, test := range durationTests { - if test.isValid && test.inRange { - got := DurationProto(test.dur) - if !proto.Equal(got, test.proto) { - t.Errorf("DurationProto(%v) = %v, want %v", test.dur, got, test.proto) - } - } - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go deleted file mode 100644 index a69b403c..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go +++ /dev/null @@ -1,79 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/empty.proto - -package empty // import "github.com/golang/protobuf/ptypes/empty" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// A generic empty message that you can re-use to avoid defining duplicated -// empty messages in your APIs. A typical example is to use it as the request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. -type Empty struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { - return fileDescriptor_empty_39e6d6db0632e5b2, []int{0} -} -func (*Empty) XXX_WellKnownType() string { return "Empty" } -func (m *Empty) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Empty.Unmarshal(m, b) -} -func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Empty.Marshal(b, m, deterministic) -} -func (dst *Empty) XXX_Merge(src proto.Message) { - xxx_messageInfo_Empty.Merge(dst, src) -} -func (m *Empty) XXX_Size() int { - return xxx_messageInfo_Empty.Size(m) -} -func (m *Empty) XXX_DiscardUnknown() { - xxx_messageInfo_Empty.DiscardUnknown(m) -} - -var xxx_messageInfo_Empty proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") -} - -func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_empty_39e6d6db0632e5b2) } - -var fileDescriptor_empty_39e6d6db0632e5b2 = []byte{ - // 148 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, - 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, - 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, - 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, - 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c, - 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, - 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, - 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6, - 0xb7, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto deleted file mode 100644 index 03cacd23..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto +++ /dev/null @@ -1,52 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/empty"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "EmptyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; -option cc_enable_arenas = true; - -// A generic empty message that you can re-use to avoid defining duplicated -// empty messages in your APIs. A typical example is to use it as the request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. -message Empty {} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go deleted file mode 100644 index ee6382e1..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ /dev/null @@ -1,450 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/struct.proto - -package structpb // import "github.com/golang/protobuf/ptypes/struct" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// `NullValue` is a singleton enumeration to represent the null value for the -// `Value` type union. -// -// The JSON representation for `NullValue` is JSON `null`. -type NullValue int32 - -const ( - // Null value. - NullValue_NULL_VALUE NullValue = 0 -) - -var NullValue_name = map[int32]string{ - 0: "NULL_VALUE", -} -var NullValue_value = map[string]int32{ - "NULL_VALUE": 0, -} - -func (x NullValue) String() string { - return proto.EnumName(NullValue_name, int32(x)) -} -func (NullValue) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} -} -func (NullValue) XXX_WellKnownType() string { return "NullValue" } - -// `Struct` represents a structured data value, consisting of fields -// which map to dynamically typed values. In some languages, `Struct` -// might be supported by a native representation. For example, in -// scripting languages like JS a struct is represented as an -// object. The details of that representation are described together -// with the proto support for the language. -// -// The JSON representation for `Struct` is JSON object. -type Struct struct { - // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Struct) Reset() { *m = Struct{} } -func (m *Struct) String() string { return proto.CompactTextString(m) } -func (*Struct) ProtoMessage() {} -func (*Struct) Descriptor() ([]byte, []int) { - return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} -} -func (*Struct) XXX_WellKnownType() string { return "Struct" } -func (m *Struct) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Struct.Unmarshal(m, b) -} -func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Struct.Marshal(b, m, deterministic) -} -func (dst *Struct) XXX_Merge(src proto.Message) { - xxx_messageInfo_Struct.Merge(dst, src) -} -func (m *Struct) XXX_Size() int { - return xxx_messageInfo_Struct.Size(m) -} -func (m *Struct) XXX_DiscardUnknown() { - xxx_messageInfo_Struct.DiscardUnknown(m) -} - -var xxx_messageInfo_Struct proto.InternalMessageInfo - -func (m *Struct) GetFields() map[string]*Value { - if m != nil { - return m.Fields - } - return nil -} - -// `Value` represents a dynamically typed value which can be either -// null, a number, a string, a boolean, a recursive struct value, or a -// list of values. A producer of value is expected to set one of that -// variants, absence of any variant indicates an error. -// -// The JSON representation for `Value` is JSON value. -type Value struct { - // The kind of value. - // - // Types that are valid to be assigned to Kind: - // *Value_NullValue - // *Value_NumberValue - // *Value_StringValue - // *Value_BoolValue - // *Value_StructValue - // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Value) Reset() { *m = Value{} } -func (m *Value) String() string { return proto.CompactTextString(m) } -func (*Value) ProtoMessage() {} -func (*Value) Descriptor() ([]byte, []int) { - return fileDescriptor_struct_3a5a94e0c7801b27, []int{1} -} -func (*Value) XXX_WellKnownType() string { return "Value" } -func (m *Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Value.Unmarshal(m, b) -} -func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Value.Marshal(b, m, deterministic) -} -func (dst *Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Value.Merge(dst, src) -} -func (m *Value) XXX_Size() int { - return xxx_messageInfo_Value.Size(m) -} -func (m *Value) XXX_DiscardUnknown() { - xxx_messageInfo_Value.DiscardUnknown(m) -} - -var xxx_messageInfo_Value proto.InternalMessageInfo - -type isValue_Kind interface { - isValue_Kind() -} - -type Value_NullValue struct { - NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` -} - -type Value_NumberValue struct { - NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` -} - -type Value_StringValue struct { - StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type Value_BoolValue struct { - BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type Value_StructValue struct { - StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` -} - -type Value_ListValue struct { - ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` -} - -func (*Value_NullValue) isValue_Kind() {} - -func (*Value_NumberValue) isValue_Kind() {} - -func (*Value_StringValue) isValue_Kind() {} - -func (*Value_BoolValue) isValue_Kind() {} - -func (*Value_StructValue) isValue_Kind() {} - -func (*Value_ListValue) isValue_Kind() {} - -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind - } - return nil -} - -func (m *Value) GetNullValue() NullValue { - if x, ok := m.GetKind().(*Value_NullValue); ok { - return x.NullValue - } - return NullValue_NULL_VALUE -} - -func (m *Value) GetNumberValue() float64 { - if x, ok := m.GetKind().(*Value_NumberValue); ok { - return x.NumberValue - } - return 0 -} - -func (m *Value) GetStringValue() string { - if x, ok := m.GetKind().(*Value_StringValue); ok { - return x.StringValue - } - return "" -} - -func (m *Value) GetBoolValue() bool { - if x, ok := m.GetKind().(*Value_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (m *Value) GetStructValue() *Struct { - if x, ok := m.GetKind().(*Value_StructValue); ok { - return x.StructValue - } - return nil -} - -func (m *Value) GetListValue() *ListValue { - if x, ok := m.GetKind().(*Value_ListValue); ok { - return x.ListValue - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ - (*Value_NullValue)(nil), - (*Value_NumberValue)(nil), - (*Value_StringValue)(nil), - (*Value_BoolValue)(nil), - (*Value_StructValue)(nil), - (*Value_ListValue)(nil), - } -} - -func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Value) - // kind - switch x := m.Kind.(type) { - case *Value_NullValue: - b.EncodeVarint(1<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.NullValue)) - case *Value_NumberValue: - b.EncodeVarint(2<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.NumberValue)) - case *Value_StringValue: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeStringBytes(x.StringValue) - case *Value_BoolValue: - t := uint64(0) - if x.BoolValue { - t = 1 - } - b.EncodeVarint(4<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *Value_StructValue: - b.EncodeVarint(5<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.StructValue); err != nil { - return err - } - case *Value_ListValue: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ListValue); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Value.Kind has unexpected type %T", x) - } - return nil -} - -func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Value) - switch tag { - case 1: // kind.null_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Kind = &Value_NullValue{NullValue(x)} - return true, err - case 2: // kind.number_value - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Kind = &Value_NumberValue{math.Float64frombits(x)} - return true, err - case 3: // kind.string_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Kind = &Value_StringValue{x} - return true, err - case 4: // kind.bool_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Kind = &Value_BoolValue{x != 0} - return true, err - case 5: // kind.struct_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Struct) - err := b.DecodeMessage(msg) - m.Kind = &Value_StructValue{msg} - return true, err - case 6: // kind.list_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ListValue) - err := b.DecodeMessage(msg) - m.Kind = &Value_ListValue{msg} - return true, err - default: - return false, nil - } -} - -func _Value_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Value) - // kind - switch x := m.Kind.(type) { - case *Value_NullValue: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.NullValue)) - case *Value_NumberValue: - n += 1 // tag and wire - n += 8 - case *Value_StringValue: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.StringValue))) - n += len(x.StringValue) - case *Value_BoolValue: - n += 1 // tag and wire - n += 1 - case *Value_StructValue: - s := proto.Size(x.StructValue) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *Value_ListValue: - s := proto.Size(x.ListValue) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// `ListValue` is a wrapper around a repeated field of values. -// -// The JSON representation for `ListValue` is JSON array. -type ListValue struct { - // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListValue) Reset() { *m = ListValue{} } -func (m *ListValue) String() string { return proto.CompactTextString(m) } -func (*ListValue) ProtoMessage() {} -func (*ListValue) Descriptor() ([]byte, []int) { - return fileDescriptor_struct_3a5a94e0c7801b27, []int{2} -} -func (*ListValue) XXX_WellKnownType() string { return "ListValue" } -func (m *ListValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListValue.Unmarshal(m, b) -} -func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) -} -func (dst *ListValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListValue.Merge(dst, src) -} -func (m *ListValue) XXX_Size() int { - return xxx_messageInfo_ListValue.Size(m) -} -func (m *ListValue) XXX_DiscardUnknown() { - xxx_messageInfo_ListValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ListValue proto.InternalMessageInfo - -func (m *ListValue) GetValues() []*Value { - if m != nil { - return m.Values - } - return nil -} - -func init() { - proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") - proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") - proto.RegisterType((*Value)(nil), "google.protobuf.Value") - proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") - proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) -} - -func init() { - proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27) -} - -var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{ - // 417 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, - 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, - 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, - 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, - 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, - 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, - 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, - 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, - 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, - 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, - 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, - 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, - 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, - 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, - 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, - 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, - 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, - 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, - 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, - 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, - 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, - 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, - 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, - 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, - 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, - 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, - 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto deleted file mode 100644 index 7d7808e7..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto +++ /dev/null @@ -1,96 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "StructProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - - -// `Struct` represents a structured data value, consisting of fields -// which map to dynamically typed values. In some languages, `Struct` -// might be supported by a native representation. For example, in -// scripting languages like JS a struct is represented as an -// object. The details of that representation are described together -// with the proto support for the language. -// -// The JSON representation for `Struct` is JSON object. -message Struct { - // Unordered map of dynamically typed values. - map fields = 1; -} - -// `Value` represents a dynamically typed value which can be either -// null, a number, a string, a boolean, a recursive struct value, or a -// list of values. A producer of value is expected to set one of that -// variants, absence of any variant indicates an error. -// -// The JSON representation for `Value` is JSON value. -message Value { - // The kind of value. - oneof kind { - // Represents a null value. - NullValue null_value = 1; - // Represents a double value. - double number_value = 2; - // Represents a string value. - string string_value = 3; - // Represents a boolean value. - bool bool_value = 4; - // Represents a structured value. - Struct struct_value = 5; - // Represents a repeated `Value`. - ListValue list_value = 6; - } -} - -// `NullValue` is a singleton enumeration to represent the null value for the -// `Value` type union. -// -// The JSON representation for `NullValue` is JSON `null`. -enum NullValue { - // Null value. - NULL_VALUE = 0; -} - -// `ListValue` is a wrapper around a repeated field of values. -// -// The JSON representation for `ListValue` is JSON array. -message ListValue { - // Repeated field of dynamically typed values. - repeated Value values = 1; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 47f10dbc..026d0d49 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -1,46 +1,18 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements operations on google.protobuf.Timestamp. - import ( "errors" "fmt" "time" - tspb "github.com/golang/protobuf/ptypes/timestamp" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" ) +// Range of google.protobuf.Duration as specified in timestamp.proto. const ( // Seconds field of the earliest valid Timestamp. // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). @@ -50,44 +22,18 @@ const ( maxValidSeconds = 253402300800 ) -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// Timestamp converts a timestamppb.Timestamp to a time.Time. // It returns an error if the argument is invalid. // -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the +// Unlike most Go functions, if Timestamp returns an error, the first return +// value is not the zero time.Time. Instead, it is the value obtained from the // time.Unix function when passed the contents of the Timestamp, in the UTC // locale. This may or may not be a meaningful time; many invalid Timestamps // do map to valid time.Times. // // A nil Timestamp returns an error. The first return value in that case is // undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { +func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. var t time.Time @@ -100,7 +46,7 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { +func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { panic("ptypes: time.Now() out of Timestamp range") @@ -110,12 +56,10 @@ func TimestampNow() *tspb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := &tspb.Timestamp{ - Seconds: seconds, - Nanos: nanos, +func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { + ts := ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), } if err := validateTimestamp(ts); err != nil { return nil, err @@ -123,12 +67,37 @@ func TimestampProto(t time.Time) (*tspb.Timestamp, error) { return ts, nil } -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { +// TimestampString returns the RFC 3339 string for valid Timestamps. +// For invalid Timestamps, it returns an error message in parentheses. +func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { return fmt.Sprintf("(%v)", err) } return t.Format(time.RFC3339Nano) } + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) +// and has a Nanos field in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes the problem. +// +// Every valid Timestamp can be represented by a time.Time, +// but the converse is not true. +func validateTimestamp(ts *timestamppb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 8e76ae97..a76f8076 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,175 +1,64 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto -package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" +package timestamp -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" +) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/timestamp.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +type Timestamp = timestamppb.Timestamp -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) -// to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} -} -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (dst *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(dst, src) +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) -} - -var xxx_messageInfo_Timestamp proto.InternalMessageInfo -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { - proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) -} - -var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto deleted file mode 100644 index 06750ab1..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ /dev/null @@ -1,133 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) -// to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go deleted file mode 100644 index 6e3c969b..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go +++ /dev/null @@ -1,153 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -import ( - "math" - "testing" - "time" - - "github.com/golang/protobuf/proto" - tspb "github.com/golang/protobuf/ptypes/timestamp" -) - -var tests = []struct { - ts *tspb.Timestamp - valid bool - t time.Time -}{ - // The timestamp representing the Unix epoch date. - {&tspb.Timestamp{Seconds: 0, Nanos: 0}, true, utcDate(1970, 1, 1)}, - // The smallest representable timestamp. - {&tspb.Timestamp{Seconds: math.MinInt64, Nanos: math.MinInt32}, false, - time.Unix(math.MinInt64, math.MinInt32).UTC()}, - // The smallest representable timestamp with non-negative nanos. - {&tspb.Timestamp{Seconds: math.MinInt64, Nanos: 0}, false, time.Unix(math.MinInt64, 0).UTC()}, - // The earliest valid timestamp. - {&tspb.Timestamp{Seconds: minValidSeconds, Nanos: 0}, true, utcDate(1, 1, 1)}, - //"0001-01-01T00:00:00Z"}, - // The largest representable timestamp. - {&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: math.MaxInt32}, false, - time.Unix(math.MaxInt64, math.MaxInt32).UTC()}, - // The largest representable timestamp with nanos in range. - {&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: 1e9 - 1}, false, - time.Unix(math.MaxInt64, 1e9-1).UTC()}, - // The largest valid timestamp. - {&tspb.Timestamp{Seconds: maxValidSeconds - 1, Nanos: 1e9 - 1}, true, - time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)}, - // The smallest invalid timestamp that is larger than the valid range. - {&tspb.Timestamp{Seconds: maxValidSeconds, Nanos: 0}, false, time.Unix(maxValidSeconds, 0).UTC()}, - // A date before the epoch. - {&tspb.Timestamp{Seconds: -281836800, Nanos: 0}, true, utcDate(1961, 1, 26)}, - // A date after the epoch. - {&tspb.Timestamp{Seconds: 1296000000, Nanos: 0}, true, utcDate(2011, 1, 26)}, - // A date after the epoch, in the middle of the day. - {&tspb.Timestamp{Seconds: 1296012345, Nanos: 940483}, true, - time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)}, -} - -func TestValidateTimestamp(t *testing.T) { - for _, s := range tests { - got := validateTimestamp(s.ts) - if (got == nil) != s.valid { - t.Errorf("validateTimestamp(%v) = %v, want %v", s.ts, got, s.valid) - } - } -} - -func TestTimestamp(t *testing.T) { - for _, s := range tests { - got, err := Timestamp(s.ts) - if (err == nil) != s.valid { - t.Errorf("Timestamp(%v) error = %v, but valid = %t", s.ts, err, s.valid) - } else if s.valid && got != s.t { - t.Errorf("Timestamp(%v) = %v, want %v", s.ts, got, s.t) - } - } - // Special case: a nil Timestamp is an error, but returns the 0 Unix time. - got, err := Timestamp(nil) - want := time.Unix(0, 0).UTC() - if got != want { - t.Errorf("Timestamp(nil) = %v, want %v", got, want) - } - if err == nil { - t.Errorf("Timestamp(nil) error = nil, expected error") - } -} - -func TestTimestampProto(t *testing.T) { - for _, s := range tests { - got, err := TimestampProto(s.t) - if (err == nil) != s.valid { - t.Errorf("TimestampProto(%v) error = %v, but valid = %t", s.t, err, s.valid) - } else if s.valid && !proto.Equal(got, s.ts) { - t.Errorf("TimestampProto(%v) = %v, want %v", s.t, got, s.ts) - } - } - // No corresponding special case here: no time.Time results in a nil Timestamp. -} - -func TestTimestampString(t *testing.T) { - for _, test := range []struct { - ts *tspb.Timestamp - want string - }{ - // Not much testing needed because presumably time.Format is - // well-tested. - {&tspb.Timestamp{Seconds: 0, Nanos: 0}, "1970-01-01T00:00:00Z"}, - {&tspb.Timestamp{Seconds: minValidSeconds - 1, Nanos: 0}, "(timestamp: seconds:-62135596801 before 0001-01-01)"}, - } { - got := TimestampString(test.ts) - if got != test.want { - t.Errorf("TimestampString(%v) = %q, want %q", test.ts, got, test.want) - } - } -} - -func utcDate(year, month, day int) time.Time { - return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) -} - -func TestTimestampNow(t *testing.T) { - // Bracket the expected time. - before := time.Now() - ts := TimestampNow() - after := time.Now() - - tm, err := Timestamp(ts) - if err != nil { - t.Errorf("between %v and %v\nTimestampNow() = %v\nwhich is invalid (%v)", before, after, ts, err) - } - if tm.Before(before) || tm.After(after) { - t.Errorf("between %v and %v\nTimestamp(TimestampNow()) = %v", before, after, tm) - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go deleted file mode 100644 index 0f0fa837..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ /dev/null @@ -1,443 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/wrappers.proto - -package wrappers // import "github.com/golang/protobuf/ptypes/wrappers" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Wrapper message for `double`. -// -// The JSON representation for `DoubleValue` is JSON number. -type DoubleValue struct { - // The double value. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleValue) Reset() { *m = DoubleValue{} } -func (m *DoubleValue) String() string { return proto.CompactTextString(m) } -func (*DoubleValue) ProtoMessage() {} -func (*DoubleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{0} -} -func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } -func (m *DoubleValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleValue.Unmarshal(m, b) -} -func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) -} -func (dst *DoubleValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleValue.Merge(dst, src) -} -func (m *DoubleValue) XXX_Size() int { - return xxx_messageInfo_DoubleValue.Size(m) -} -func (m *DoubleValue) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleValue.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleValue proto.InternalMessageInfo - -func (m *DoubleValue) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `float`. -// -// The JSON representation for `FloatValue` is JSON number. -type FloatValue struct { - // The float value. - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FloatValue) Reset() { *m = FloatValue{} } -func (m *FloatValue) String() string { return proto.CompactTextString(m) } -func (*FloatValue) ProtoMessage() {} -func (*FloatValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{1} -} -func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } -func (m *FloatValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FloatValue.Unmarshal(m, b) -} -func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) -} -func (dst *FloatValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_FloatValue.Merge(dst, src) -} -func (m *FloatValue) XXX_Size() int { - return xxx_messageInfo_FloatValue.Size(m) -} -func (m *FloatValue) XXX_DiscardUnknown() { - xxx_messageInfo_FloatValue.DiscardUnknown(m) -} - -var xxx_messageInfo_FloatValue proto.InternalMessageInfo - -func (m *FloatValue) GetValue() float32 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `int64`. -// -// The JSON representation for `Int64Value` is JSON string. -type Int64Value struct { - // The int64 value. - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Int64Value) Reset() { *m = Int64Value{} } -func (m *Int64Value) String() string { return proto.CompactTextString(m) } -func (*Int64Value) ProtoMessage() {} -func (*Int64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{2} -} -func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } -func (m *Int64Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Int64Value.Unmarshal(m, b) -} -func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) -} -func (dst *Int64Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Int64Value.Merge(dst, src) -} -func (m *Int64Value) XXX_Size() int { - return xxx_messageInfo_Int64Value.Size(m) -} -func (m *Int64Value) XXX_DiscardUnknown() { - xxx_messageInfo_Int64Value.DiscardUnknown(m) -} - -var xxx_messageInfo_Int64Value proto.InternalMessageInfo - -func (m *Int64Value) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `uint64`. -// -// The JSON representation for `UInt64Value` is JSON string. -type UInt64Value struct { - // The uint64 value. - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UInt64Value) Reset() { *m = UInt64Value{} } -func (m *UInt64Value) String() string { return proto.CompactTextString(m) } -func (*UInt64Value) ProtoMessage() {} -func (*UInt64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{3} -} -func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } -func (m *UInt64Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UInt64Value.Unmarshal(m, b) -} -func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) -} -func (dst *UInt64Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_UInt64Value.Merge(dst, src) -} -func (m *UInt64Value) XXX_Size() int { - return xxx_messageInfo_UInt64Value.Size(m) -} -func (m *UInt64Value) XXX_DiscardUnknown() { - xxx_messageInfo_UInt64Value.DiscardUnknown(m) -} - -var xxx_messageInfo_UInt64Value proto.InternalMessageInfo - -func (m *UInt64Value) GetValue() uint64 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `int32`. -// -// The JSON representation for `Int32Value` is JSON number. -type Int32Value struct { - // The int32 value. - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Int32Value) Reset() { *m = Int32Value{} } -func (m *Int32Value) String() string { return proto.CompactTextString(m) } -func (*Int32Value) ProtoMessage() {} -func (*Int32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{4} -} -func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } -func (m *Int32Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Int32Value.Unmarshal(m, b) -} -func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) -} -func (dst *Int32Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Int32Value.Merge(dst, src) -} -func (m *Int32Value) XXX_Size() int { - return xxx_messageInfo_Int32Value.Size(m) -} -func (m *Int32Value) XXX_DiscardUnknown() { - xxx_messageInfo_Int32Value.DiscardUnknown(m) -} - -var xxx_messageInfo_Int32Value proto.InternalMessageInfo - -func (m *Int32Value) GetValue() int32 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `uint32`. -// -// The JSON representation for `UInt32Value` is JSON number. -type UInt32Value struct { - // The uint32 value. - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UInt32Value) Reset() { *m = UInt32Value{} } -func (m *UInt32Value) String() string { return proto.CompactTextString(m) } -func (*UInt32Value) ProtoMessage() {} -func (*UInt32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{5} -} -func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } -func (m *UInt32Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UInt32Value.Unmarshal(m, b) -} -func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) -} -func (dst *UInt32Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_UInt32Value.Merge(dst, src) -} -func (m *UInt32Value) XXX_Size() int { - return xxx_messageInfo_UInt32Value.Size(m) -} -func (m *UInt32Value) XXX_DiscardUnknown() { - xxx_messageInfo_UInt32Value.DiscardUnknown(m) -} - -var xxx_messageInfo_UInt32Value proto.InternalMessageInfo - -func (m *UInt32Value) GetValue() uint32 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `bool`. -// -// The JSON representation for `BoolValue` is JSON `true` and `false`. -type BoolValue struct { - // The bool value. - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BoolValue) Reset() { *m = BoolValue{} } -func (m *BoolValue) String() string { return proto.CompactTextString(m) } -func (*BoolValue) ProtoMessage() {} -func (*BoolValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{6} -} -func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } -func (m *BoolValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BoolValue.Unmarshal(m, b) -} -func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) -} -func (dst *BoolValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_BoolValue.Merge(dst, src) -} -func (m *BoolValue) XXX_Size() int { - return xxx_messageInfo_BoolValue.Size(m) -} -func (m *BoolValue) XXX_DiscardUnknown() { - xxx_messageInfo_BoolValue.DiscardUnknown(m) -} - -var xxx_messageInfo_BoolValue proto.InternalMessageInfo - -func (m *BoolValue) GetValue() bool { - if m != nil { - return m.Value - } - return false -} - -// Wrapper message for `string`. -// -// The JSON representation for `StringValue` is JSON string. -type StringValue struct { - // The string value. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StringValue) Reset() { *m = StringValue{} } -func (m *StringValue) String() string { return proto.CompactTextString(m) } -func (*StringValue) ProtoMessage() {} -func (*StringValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{7} -} -func (*StringValue) XXX_WellKnownType() string { return "StringValue" } -func (m *StringValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringValue.Unmarshal(m, b) -} -func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) -} -func (dst *StringValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringValue.Merge(dst, src) -} -func (m *StringValue) XXX_Size() int { - return xxx_messageInfo_StringValue.Size(m) -} -func (m *StringValue) XXX_DiscardUnknown() { - xxx_messageInfo_StringValue.DiscardUnknown(m) -} - -var xxx_messageInfo_StringValue proto.InternalMessageInfo - -func (m *StringValue) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// Wrapper message for `bytes`. -// -// The JSON representation for `BytesValue` is JSON string. -type BytesValue struct { - // The bytes value. - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BytesValue) Reset() { *m = BytesValue{} } -func (m *BytesValue) String() string { return proto.CompactTextString(m) } -func (*BytesValue) ProtoMessage() {} -func (*BytesValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{8} -} -func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } -func (m *BytesValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BytesValue.Unmarshal(m, b) -} -func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) -} -func (dst *BytesValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesValue.Merge(dst, src) -} -func (m *BytesValue) XXX_Size() int { - return xxx_messageInfo_BytesValue.Size(m) -} -func (m *BytesValue) XXX_DiscardUnknown() { - xxx_messageInfo_BytesValue.DiscardUnknown(m) -} - -var xxx_messageInfo_BytesValue proto.InternalMessageInfo - -func (m *BytesValue) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") - proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") - proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") - proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") - proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") - proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") - proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") - proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") - proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") -} - -func init() { - proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253) -} - -var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{ - // 259 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, - 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, - 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, - 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, - 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, - 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, - 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, - 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, - 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, - 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, - 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, - 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, - 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, - 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, - 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, - 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto deleted file mode 100644 index 01947639..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto +++ /dev/null @@ -1,118 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Wrappers for primitive (non-message) types. These types are useful -// for embedding primitives in the `google.protobuf.Any` type and for places -// where we need to distinguish between the absence of a primitive -// typed field and its default value. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/wrappers"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "WrappersProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// Wrapper message for `double`. -// -// The JSON representation for `DoubleValue` is JSON number. -message DoubleValue { - // The double value. - double value = 1; -} - -// Wrapper message for `float`. -// -// The JSON representation for `FloatValue` is JSON number. -message FloatValue { - // The float value. - float value = 1; -} - -// Wrapper message for `int64`. -// -// The JSON representation for `Int64Value` is JSON string. -message Int64Value { - // The int64 value. - int64 value = 1; -} - -// Wrapper message for `uint64`. -// -// The JSON representation for `UInt64Value` is JSON string. -message UInt64Value { - // The uint64 value. - uint64 value = 1; -} - -// Wrapper message for `int32`. -// -// The JSON representation for `Int32Value` is JSON number. -message Int32Value { - // The int32 value. - int32 value = 1; -} - -// Wrapper message for `uint32`. -// -// The JSON representation for `UInt32Value` is JSON number. -message UInt32Value { - // The uint32 value. - uint32 value = 1; -} - -// Wrapper message for `bool`. -// -// The JSON representation for `BoolValue` is JSON `true` and `false`. -message BoolValue { - // The bool value. - bool value = 1; -} - -// Wrapper message for `string`. -// -// The JSON representation for `StringValue` is JSON string. -message StringValue { - // The string value. - string value = 1; -} - -// Wrapper message for `bytes`. -// -// The JSON representation for `BytesValue` is JSON string. -message BytesValue { - // The bytes value. - bytes value = 1; -} diff --git a/vendor/github.com/golang/protobuf/regenerate.sh b/vendor/github.com/golang/protobuf/regenerate.sh deleted file mode 100755 index dc7e2d1f..00000000 --- a/vendor/github.com/golang/protobuf/regenerate.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -set -e - -# Install the working tree's protoc-gen-gen in a tempdir. -tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) -trap 'rm -rf $tmpdir' EXIT -mkdir -p $tmpdir/bin -PATH=$tmpdir/bin:$PATH -GOBIN=$tmpdir/bin go install ./protoc-gen-go - -# Public imports require at least Go 1.9. -supportTypeAliases="" -if go list -f '{{context.ReleaseTags}}' runtime | grep -q go1.9; then - supportTypeAliases=1 -fi - -# Generate various test protos. -PROTO_DIRS=( - conformance/internal/conformance_proto - jsonpb/jsonpb_test_proto - proto - protoc-gen-go/testdata -) -for dir in ${PROTO_DIRS[@]}; do - for p in `find $dir -name "*.proto"`; do - if [[ $p == */import_public/* && ! $supportTypeAliases ]]; then - echo "# $p (skipped)" - continue; - fi - echo "# $p" - protoc -I$dir --go_out=plugins=grpc,paths=source_relative:$dir $p - done -done - -# Deriving the location of the source protos from the path to the -# protoc binary may be a bit odd, but this is what protoc itself does. -PROTO_INCLUDE=$(dirname $(dirname $(which protoc)))/include - -# Well-known types. -WKT_PROTOS=(any duration empty struct timestamp wrappers) -for p in ${WKT_PROTOS[@]}; do - echo "# google/protobuf/$p.proto" - protoc --go_out=paths=source_relative:$tmpdir google/protobuf/$p.proto - cp $tmpdir/google/protobuf/$p.pb.go ptypes/$p - cp $PROTO_INCLUDE/google/protobuf/$p.proto ptypes/$p -done - -# descriptor.proto. -echo "# google/protobuf/descriptor.proto" -protoc --go_out=paths=source_relative:$tmpdir google/protobuf/descriptor.proto -cp $tmpdir/google/protobuf/descriptor.pb.go protoc-gen-go/descriptor -cp $PROTO_INCLUDE/google/protobuf/descriptor.proto protoc-gen-go/descriptor diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS index bcfa1952..203e84eb 100644 --- a/vendor/github.com/golang/snappy/AUTHORS +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -8,8 +8,10 @@ # Please keep the list sorted. +Amazon.com, Inc Damian Gryski Google Inc. Jan Mercl <0xjnml@gmail.com> +Klaus Post Rodolfo Carvalho Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS index 931ae316..d9914732 100644 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -28,7 +28,9 @@ Damian Gryski Jan Mercl <0xjnml@gmail.com> +Jonathan Swinney Kai Backman +Klaus Post Marc-Antoine Ruel Nigel Tao Rob Pike diff --git a/vendor/github.com/golang/snappy/cmd/snappytool/main.go b/vendor/github.com/golang/snappy/cmd/snappytool/main.go deleted file mode 100644 index b0f44c3f..00000000 --- a/vendor/github.com/golang/snappy/cmd/snappytool/main.go +++ /dev/null @@ -1,46 +0,0 @@ -package main - -import ( - "errors" - "flag" - "io/ioutil" - "os" - - "github.com/golang/snappy" -) - -var ( - decode = flag.Bool("d", false, "decode") - encode = flag.Bool("e", false, "encode") -) - -func run() error { - flag.Parse() - if *decode == *encode { - return errors.New("exactly one of -d or -e must be given") - } - - in, err := ioutil.ReadAll(os.Stdin) - if err != nil { - return err - } - - out := []byte(nil) - if *decode { - out, err = snappy.Decode(nil, in) - if err != nil { - return err - } - } else { - out = snappy.Encode(nil, in) - } - _, err = os.Stdout.Write(out) - return err -} - -func main() { - if err := run(); err != nil { - os.Stderr.WriteString(err.Error() + "\n") - os.Exit(1) - } -} diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go index 72efb035..f1e04b17 100644 --- a/vendor/github.com/golang/snappy/decode.go +++ b/vendor/github.com/golang/snappy/decode.go @@ -52,6 +52,8 @@ const ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. func Decode(dst, src []byte) ([]byte, error) { dLen, s, err := decodedLen(src) if err != nil { @@ -83,6 +85,8 @@ func NewReader(r io.Reader) *Reader { } // Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. type Reader struct { r io.Reader err error diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s new file mode 100644 index 00000000..bfafa0cc --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_arm64.s @@ -0,0 +1,503 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R2 scratch +// - R3 scratch +// - R4 length or x +// - R5 offset +// - R6 &src[s] +// - R7 &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. +// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. +TEXT ·decode(SB), NOSPLIT, $56-56 + // Initialize R6, R7 and R8-R13. + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R7 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R6 + MOVD R11, R13 + ADD R12, R13, R13 + +loop: + // for s < len(src) + CMP R13, R6 + BEQ end + + // R4 = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBU (R6), R4 + MOVW R4, R3 + ANDW $3, R3 + MOVW $1, R1 + CMPW R1, R3 + BGE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + MOVW $60, R1 + ADD R4>>2, ZR, R4 + CMPW R4, R1 + BLS tagLit60Plus + + // case x < 60: + // s++ + ADD $1, R6, R6 + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R4 == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R4 can hold 64 bits, so the increment cannot overflow. + ADD $1, R4, R4 + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R2 = len(dst) - d + // R3 = len(src) - s + MOVD R10, R2 + SUB R7, R2, R2 + MOVD R13, R3 + SUB R6, R3, R3 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + MOVD $16, R1 + CMP R1, R4 + BGT callMemmove + CMP R1, R2 + BLT callMemmove + CMP R1, R3 + BLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + + VLD1 0(R6), [V0.B16] + VST1 [V0.B16], 0(R7) + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMP R2, R4 + BGT errCorrupt + CMP R3, R4 + BGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVD R7, 8(RSP) + MOVD R6, 16(RSP) + MOVD R4, 24(RSP) + MOVD R7, 32(RSP) + MOVD R6, 40(RSP) + MOVD R4, 48(RSP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVD 32(RSP), R7 + MOVD 40(RSP), R6 + MOVD 48(RSP), R4 + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R13 + ADD R12, R13, R13 + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADD R4, R6, R6 + SUB $58, R6, R6 + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // case x == 60: + MOVW $61, R1 + CMPW R1, R4 + BEQ tagLit61 + BGT tagLit62Plus + + // x = uint32(src[s-1]) + MOVBU -1(R6), R4 + B doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVHU -2(R6), R4 + B doLit + +tagLit62Plus: + MOVW $62, R1 + CMPW R1, R4 + BHI tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVHU -3(R6), R4 + MOVBU -1(R6), R3 + ORR R3<<16, R4 + B doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVWU -4(R6), R4 + B doLit + + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADD $5, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-5])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVWU -4(R6), R5 + B doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADD $3, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-3])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVHU -2(R6), R5 + B doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R3 == src[s] & 0x03 + // - R4 == src[s] + MOVD $2, R1 + CMP R1, R3 + BEQ tagCopy2 + BGT tagCopy4 + + // case tagCopy1: + // s += 2 + ADD $2, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVD R4, R5 + AND $0xe0, R5 + MOVBU -1(R6), R3 + ORR R5<<3, R3, R5 + + // length = 4 + int(src[s-2])>>2&0x7 + MOVD $7, R1 + AND R4>>2, R1, R4 + ADD $4, R4, R4 + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R4 == length && R4 > 0 + // - R5 == offset + + // if offset <= 0 { etc } + MOVD $0, R1 + CMP R1, R5 + BLE errCorrupt + + // if d < offset { etc } + MOVD R7, R3 + SUB R8, R3, R3 + CMP R5, R3 + BLT errCorrupt + + // if length > len(dst)-d { etc } + MOVD R10, R3 + SUB R7, R3, R3 + CMP R3, R4 + BGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVD R10, R14 + SUB R7, R14, R14 + MOVD R7, R15 + SUB R5, R15, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + MOVD $16, R1 + MOVD $8, R0 + CMP R1, R4 + BGT slowForwardCopy + CMP R0, R5 + BLT slowForwardCopy + CMP R1, R14 + BLT slowForwardCopy + MOVD 0(R15), R2 + MOVD R2, 0(R7) + MOVD 8(R15), R3 + MOVD R3, 8(R7) + ADD R4, R7, R7 + B loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUB $10, R14, R14 + CMP R14, R4 + BGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + MOVD $8, R1 + CMP R1, R5 + BGE fixUpSlowForwardCopy + MOVD (R15), R3 + MOVD R3, (R7) + SUB R5, R4, R4 + ADD R5, R7, R7 + ADD R5, R5, R5 + B makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R7 being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R7 to R2 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVD R7, R2 + ADD R4, R7, R7 + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + MOVD $0, R1 + CMP R1, R4 + BLE loop + MOVD (R15), R3 + MOVD R3, (R2) + ADD $8, R15, R15 + ADD $8, R2, R2 + SUB $8, R4, R4 + B finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), R3 + MOVB R3, (R7) + ADD $1, R15, R15 + ADD $1, R7, R7 + SUB $1, R4, R4 + MOVD $0, R1 + CMP R1, R4 + BNE verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMP R10, R7 + BNE errCorrupt + + // return 0 + MOVD $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVD $1, R2 + MOVD R2, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_asm.go b/vendor/github.com/golang/snappy/decode_asm.go new file mode 100644 index 00000000..7082b349 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_asm.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm +// +build amd64 arm64 + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go index 8c9f2049..2f672be5 100644 --- a/vendor/github.com/golang/snappy/decode_other.go +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm +// +build !amd64,!arm64 appengine !gc noasm package snappy @@ -85,14 +85,28 @@ func decode(dst, src []byte) int { if offset <= 0 || d < offset || length > len(dst)-d { return decodeErrCodeCorrupt } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs // forwards, even if the slices overlap. Conceptually, this is: // // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] } + d += length } if d != len(dst) { return decodeErrCodeCorrupt diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go index 8d393e90..7f236570 100644 --- a/vendor/github.com/golang/snappy/encode.go +++ b/vendor/github.com/golang/snappy/encode.go @@ -15,6 +15,8 @@ import ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) @@ -139,6 +141,8 @@ func NewBufferedWriter(w io.Writer) *Writer { } // Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. type Writer struct { w io.Writer err error diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s new file mode 100644 index 00000000..1f565ee7 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -0,0 +1,729 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - R3 len(lit) +// - R4 n +// - R6 return value +// - R8 &dst[i] +// - R10 &lit[0] +// +// The 32 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $32-56 + MOVD dst_base+0(FP), R8 + MOVD lit_base+24(FP), R10 + MOVD lit_len+32(FP), R3 + MOVD R3, R6 + MOVW R3, R4 + SUBW $1, R4, R4 + + MOVW $60, R2 + CMPW R2, R4 + BLT oneByte + MOVW $256, R2 + CMPW R2, R4 + BLT twoBytes + +threeBytes: + MOVD $0xf4, R2 + MOVB R2, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + ADD $3, R6, R6 + B memmove + +twoBytes: + MOVD $0xf0, R2 + MOVB R2, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + ADD $2, R6, R6 + B memmove + +oneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + ADD $1, R6, R6 + +memmove: + MOVD R6, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - R3 length +// - R7 &dst[0] +// - R8 &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVD dst_base+0(FP), R8 + MOVD R8, R7 + MOVD offset+24(FP), R11 + MOVD length+32(FP), R3 + +loop0: + // for length >= 68 { etc } + MOVW $68, R2 + CMPW R2, R3 + BLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $64, R3, R3 + B loop0 + +step1: + // if length > 64 { etc } + MOVD $64, R2 + CMP R2, R3 + BLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $60, R3, R3 + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + MOVD $12, R2 + CMP R2, R3 + BGE step3 + MOVW $2048, R2 + CMPW R2, R11 + BGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $3, R11, R11 + AND $0xe0, R11, R11 + SUB $4, R3, R3 + LSLW $2, R3 + AND $0xff, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUB $1, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - R6 &src[0] +// - R7 &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVD src_base+0(FP), R6 + MOVD src_len+8(FP), R14 + MOVD i+24(FP), R15 + MOVD j+32(FP), R7 + ADD R6, R14, R14 + ADD R6, R15, R15 + ADD R6, R7, R7 + MOVD R14, R13 + SUB $8, R13, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI cmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE bsf + ADD $8, R15, R15 + ADD $8, R7, R7 + B cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS extendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE extendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - R3 . . +// - R4 . . +// - R5 64 shift +// - R6 72 &src[0], tableSize +// - R7 80 &src[s] +// - R8 88 &dst[d] +// - R9 96 sLimit +// - R10 . &src[nextEmit] +// - R11 104 prevHash, currHash, nextHash, offset +// - R12 112 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 120 candidate +// - R16 . hash constant, 0x1e35a7bd +// - R17 . &table +// - . 128 table +// +// The second column (64, 72, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. +TEXT ·encodeBlock(SB), 0, $32896-56 + MOVD dst_base+0(FP), R8 + MOVD src_base+24(FP), R7 + MOVD src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVD $24, R5 + MOVD $256, R6 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + MOVD $16384, R2 + CMP R2, R6 + BGE varTable + CMP R14, R6 + BGE varTable + SUB $1, R5, R5 + LSL $1, R6, R6 + B calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each VST1 + // writes 64 bytes, so we can do only tableSize/32 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + // This clear could overrun the first tableSize elements, but it won't + // overrun the allocated stack size. + ADD $128, RSP, R17 + MOVD R17, R4 + + // !!! R6 = &src[tableSize] + ADD R6<<1, R17, R6 + + // zero the SIMD registers + VEOR V0.B16, V0.B16, V0.B16 + VEOR V1.B16, V1.B16, V1.B16 + VEOR V2.B16, V2.B16, V2.B16 + VEOR V3.B16, V3.B16, V3.B16 + +memclr: + VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R4) + CMP R4, R6 + BHI memclr + + // !!! R6 = &src[0] + MOVD R7, R6 + + // sLimit := len(src) - inputMargin + MOVD R14, R9 + SUB $15, R9, R9 + + // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't + // change for the rest of the function. + MOVD R5, 64(RSP) + MOVD R6, 72(RSP) + MOVD R9, 96(RSP) + + // nextEmit := 0 + MOVD R6, R10 + + // s := 1 + ADD $1, R7, R7 + + // nextHash := hash(load32(src, s), shift) + MOVW 0(R7), R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + +outer: + // for { etc } + + // skip := 32 + MOVD $32, R12 + + // nextS := s + MOVD R7, R13 + + // candidate := 0 + MOVD $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVD R13, R7 + + // bytesBetweenHashLookups := skip >> 5 + MOVD R12, R14 + LSR $5, R14, R14 + + // nextS = s + bytesBetweenHashLookups + ADD R14, R13, R13 + + // skip += bytesBetweenHashLookups + ADD R14, R12, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVD R13, R3 + SUB R6, R3, R3 + CMP R9, R3 + BHI emitRemainder + + // candidate = int(table[nextHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[nextHash] = uint16(s) + MOVD R7, R3 + SUB R6, R3, R3 + + MOVH R3, 0(R17)(R11<<1) + + // nextHash = hash(load32(src, nextS), shift) + MOVW 0(R13), R11 + MULW R16, R11 + LSRW R5, R11, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVW 0(R7), R3 + MOVW (R6)(R15*1), R4 + CMPW R4, R3 + BNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVD R7, R3 + SUB R10, R3, R3 + MOVD $16, R2 + CMP R2, R3 + BLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVW R3, R4 + SUBW $1, R4, R4 + + MOVW $60, R2 + CMPW R2, R4 + BLT inlineEmitLiteralOneByte + MOVW $256, R2 + CMPW R2, R4 + BLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVD $0xf4, R1 + MOVB R1, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVD $0xf0, R1 + MOVB R1, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADD R3, R8, R8 + MOVD R7, 80(RSP) + MOVD R8, 88(RSP) + MOVD R15, 120(RSP) + CALL runtime·memmove(SB) + MOVD 64(RSP), R5 + MOVD 72(RSP), R6 + MOVD 80(RSP), R7 + MOVD 88(RSP), R8 + MOVD 96(RSP), R9 + MOVD 120(RSP), R15 + B inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB R3, R4 + SUBW $1, R4, R4 + AND $0xff, R4, R4 + LSLW $2, R4, R4 + MOVB R4, (R8) + ADD $1, R8, R8 + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + VLD1 0(R10), [V0.B16] + VST1 [V0.B16], 0(R8) + ADD R3, R8, R8 + +inner1: + // for { etc } + + // base := s + MOVD R7, R12 + + // !!! offset := base - candidate + MOVD R12, R11 + SUB R15, R11, R11 + SUB R6, R11, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVD src_len+32(FP), R14 + ADD R6, R14, R14 + + // !!! R13 = &src[len(src) - 8] + MOVD R14, R13 + SUB $8, R13, R13 + + // !!! R15 = &src[candidate + 4] + ADD $4, R15, R15 + ADD R6, R15, R15 + + // !!! s += 4 + ADD $4, R7, R7 + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI inlineExtendMatchCmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchBSF + ADD $8, R15, R15 + ADD $8, R7, R7 + B inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + B inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS inlineExtendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVD R7, R3 + SUB R12, R3, R3 + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + MOVW $68, R2 + CMPW R2, R3 + BLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $64, R3, R3 + B inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + MOVW $64, R2 + CMPW R2, R3 + BLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $60, R3, R3 + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + MOVW $12, R2 + CMPW R2, R3 + BGE inlineEmitCopyStep3 + MOVW $2048, R2 + CMPW R2, R11 + BGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $8, R11, R11 + LSLW $5, R11, R11 + SUBW $4, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + B inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBW $1, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVD R7, R10 + + // if s >= sLimit { goto emitRemainder } + MOVD R7, R3 + SUB R6, R3, R3 + CMP R3, R9 + BLS emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVD -1(R7), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // table[prevHash] = uint16(s-1) + MOVD R7, R3 + SUB R6, R3, R3 + SUB $1, R3, R3 + + MOVHU R3, 0(R17)(R11<<1) + + // currHash := hash(uint32(x>>8), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // candidate = int(table[currHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[currHash] = uint16(s) + ADD $1, R3, R3 + MOVHU R3, 0(R17)(R11<<1) + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVW (R6)(R15*1), R4 + CMPW R4, R14 + BEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // s++ + ADD $1, R7, R7 + + // break out of the inner1 for loop, i.e. continue the outer loop. + B outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVD src_len+32(FP), R3 + ADD R6, R3, R3 + CMP R3, R10 + BEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVD R8, 8(RSP) + MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD R10, 32(RSP) + SUB R10, R3, R3 + MOVD R3, 40(RSP) + MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVD R8, 88(RSP) + CALL ·emitLiteral(SB) + MOVD 88(RSP), R8 + + // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVD 56(RSP), R1 + ADD R1, R8, R8 + +encodeBlockEnd: + MOVD dst_base+0(FP), R3 + SUB R3, R8, R8 + MOVD R8, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_asm.go b/vendor/github.com/golang/snappy/encode_asm.go new file mode 100644 index 00000000..107c1e71 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_asm.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm +// +build amd64 arm64 + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go index dbcae905..296d7f0b 100644 --- a/vendor/github.com/golang/snappy/encode_other.go +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm +// +build !amd64,!arm64 appengine !gc noasm package snappy diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod new file mode 100644 index 00000000..f6406bb2 --- /dev/null +++ b/vendor/github.com/golang/snappy/go.mod @@ -0,0 +1 @@ +module github.com/golang/snappy diff --git a/vendor/github.com/golang/snappy/golden_test.go b/vendor/github.com/golang/snappy/golden_test.go deleted file mode 100644 index e4496f92..00000000 --- a/vendor/github.com/golang/snappy/golden_test.go +++ /dev/null @@ -1,1965 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -// extendMatchGoldenTestCases is the i and j arguments, and the returned value, -// for every extendMatch call issued when encoding the -// testdata/Mark.Twain-Tom.Sawyer.txt file. It is used to benchmark the -// extendMatch implementation. -// -// It was generated manually by adding some print statements to the (pure Go) -// extendMatch implementation: -// -// func extendMatch(src []byte, i, j int) int { -// i0, j0 := i, j -// for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { -// } -// println("{", i0, ",", j0, ",", j, "},") -// return j -// } -// -// and running "go test -test.run=EncodeGoldenInput -tags=noasm". -var extendMatchGoldenTestCases = []struct { - i, j, want int -}{ - {11, 61, 62}, - {80, 81, 82}, - {86, 87, 101}, - {85, 133, 149}, - {152, 153, 162}, - {133, 168, 193}, - {168, 207, 225}, - {81, 255, 275}, - {278, 279, 283}, - {306, 417, 417}, - {373, 428, 430}, - {389, 444, 447}, - {474, 510, 512}, - {465, 533, 533}, - {47, 547, 547}, - {307, 551, 554}, - {420, 582, 587}, - {309, 604, 604}, - {604, 625, 625}, - {538, 629, 629}, - {328, 640, 640}, - {573, 645, 645}, - {319, 657, 657}, - {30, 664, 664}, - {45, 679, 680}, - {621, 684, 684}, - {376, 700, 700}, - {33, 707, 708}, - {601, 733, 733}, - {334, 744, 745}, - {625, 758, 759}, - {382, 763, 763}, - {550, 769, 771}, - {533, 789, 789}, - {804, 813, 813}, - {342, 841, 842}, - {742, 847, 847}, - {74, 852, 852}, - {810, 864, 864}, - {758, 868, 869}, - {714, 883, 883}, - {582, 889, 891}, - {61, 934, 935}, - {894, 942, 942}, - {939, 949, 949}, - {785, 956, 957}, - {886, 978, 978}, - {792, 998, 998}, - {998, 1005, 1005}, - {572, 1032, 1032}, - {698, 1051, 1053}, - {599, 1067, 1069}, - {1056, 1079, 1079}, - {942, 1089, 1090}, - {831, 1094, 1096}, - {1088, 1100, 1103}, - {732, 1113, 1114}, - {1037, 1118, 1118}, - {872, 1128, 1130}, - {1079, 1140, 1142}, - {332, 1162, 1162}, - {207, 1168, 1186}, - {1189, 1190, 1225}, - {105, 1229, 1230}, - {79, 1256, 1257}, - {1190, 1261, 1283}, - {255, 1306, 1306}, - {1319, 1339, 1358}, - {364, 1370, 1370}, - {955, 1378, 1380}, - {122, 1403, 1403}, - {1325, 1407, 1419}, - {664, 1423, 1424}, - {941, 1461, 1463}, - {867, 1477, 1478}, - {757, 1488, 1489}, - {1140, 1499, 1499}, - {31, 1506, 1506}, - {1487, 1510, 1512}, - {1089, 1520, 1521}, - {1467, 1525, 1529}, - {1394, 1537, 1537}, - {1499, 1541, 1541}, - {367, 1558, 1558}, - {1475, 1564, 1564}, - {1525, 1568, 1571}, - {1541, 1582, 1583}, - {864, 1587, 1588}, - {704, 1597, 1597}, - {336, 1602, 1602}, - {1383, 1613, 1613}, - {1498, 1617, 1618}, - {1051, 1623, 1625}, - {401, 1643, 1645}, - {1072, 1654, 1655}, - {1067, 1667, 1669}, - {699, 1673, 1674}, - {1587, 1683, 1684}, - {920, 1696, 1696}, - {1505, 1710, 1710}, - {1550, 1723, 1723}, - {996, 1727, 1727}, - {833, 1733, 1734}, - {1638, 1739, 1740}, - {1654, 1744, 1744}, - {753, 1761, 1761}, - {1548, 1773, 1773}, - {1568, 1777, 1780}, - {1683, 1793, 1794}, - {948, 1801, 1801}, - {1666, 1805, 1808}, - {1502, 1814, 1814}, - {1696, 1822, 1822}, - {502, 1836, 1837}, - {917, 1843, 1843}, - {1733, 1854, 1855}, - {970, 1859, 1859}, - {310, 1863, 1863}, - {657, 1872, 1872}, - {1005, 1876, 1876}, - {1662, 1880, 1880}, - {904, 1892, 1892}, - {1427, 1910, 1910}, - {1772, 1929, 1930}, - {1822, 1937, 1940}, - {1858, 1949, 1950}, - {1602, 1956, 1956}, - {1150, 1962, 1962}, - {1504, 1966, 1967}, - {51, 1971, 1971}, - {1605, 1979, 1979}, - {1458, 1983, 1988}, - {1536, 2001, 2006}, - {1373, 2014, 2018}, - {1494, 2025, 2025}, - {1667, 2029, 2031}, - {1592, 2035, 2035}, - {330, 2045, 2045}, - {1376, 2053, 2053}, - {1991, 2058, 2059}, - {1635, 2065, 2065}, - {1992, 2073, 2074}, - {2014, 2080, 2081}, - {1546, 2085, 2087}, - {59, 2099, 2099}, - {1996, 2106, 2106}, - {1836, 2110, 2110}, - {2068, 2114, 2114}, - {1338, 2122, 2122}, - {1562, 2128, 2130}, - {1934, 2134, 2134}, - {2114, 2141, 2142}, - {977, 2149, 2150}, - {956, 2154, 2155}, - {1407, 2162, 2162}, - {1773, 2166, 2166}, - {883, 2171, 2171}, - {623, 2175, 2178}, - {1520, 2191, 2192}, - {1162, 2200, 2200}, - {912, 2204, 2204}, - {733, 2208, 2208}, - {1777, 2212, 2215}, - {1532, 2219, 2219}, - {718, 2223, 2225}, - {2069, 2229, 2229}, - {2207, 2245, 2246}, - {1139, 2264, 2264}, - {677, 2274, 2274}, - {2099, 2279, 2279}, - {1863, 2283, 2283}, - {1966, 2305, 2306}, - {2279, 2313, 2313}, - {1628, 2319, 2319}, - {755, 2329, 2329}, - {1461, 2334, 2334}, - {2117, 2340, 2340}, - {2313, 2349, 2349}, - {1859, 2353, 2353}, - {1048, 2362, 2362}, - {895, 2366, 2366}, - {2278, 2373, 2373}, - {1884, 2377, 2377}, - {1402, 2387, 2392}, - {700, 2398, 2398}, - {1971, 2402, 2402}, - {2009, 2419, 2419}, - {1441, 2426, 2428}, - {2208, 2432, 2432}, - {2038, 2436, 2436}, - {932, 2443, 2443}, - {1759, 2447, 2448}, - {744, 2452, 2452}, - {1875, 2458, 2458}, - {2405, 2468, 2468}, - {1596, 2472, 2473}, - {1953, 2480, 2482}, - {736, 2487, 2487}, - {1913, 2493, 2493}, - {774, 2497, 2497}, - {1484, 2506, 2508}, - {2432, 2512, 2512}, - {752, 2519, 2519}, - {2497, 2523, 2523}, - {2409, 2528, 2529}, - {2122, 2533, 2533}, - {2396, 2537, 2538}, - {2410, 2547, 2548}, - {1093, 2555, 2560}, - {551, 2564, 2565}, - {2268, 2569, 2569}, - {1362, 2580, 2580}, - {1916, 2584, 2585}, - {994, 2589, 2590}, - {1979, 2596, 2596}, - {1041, 2602, 2602}, - {2104, 2614, 2616}, - {2609, 2621, 2628}, - {2329, 2638, 2638}, - {2211, 2657, 2658}, - {2638, 2662, 2667}, - {2578, 2676, 2679}, - {2153, 2685, 2686}, - {2608, 2696, 2697}, - {598, 2712, 2712}, - {2620, 2719, 2720}, - {1888, 2724, 2728}, - {2709, 2732, 2732}, - {1365, 2739, 2739}, - {784, 2747, 2748}, - {424, 2753, 2753}, - {2204, 2759, 2759}, - {812, 2768, 2769}, - {2455, 2773, 2773}, - {1722, 2781, 2781}, - {1917, 2792, 2792}, - {2705, 2799, 2799}, - {2685, 2806, 2807}, - {2742, 2811, 2811}, - {1370, 2818, 2818}, - {2641, 2830, 2830}, - {2512, 2837, 2837}, - {2457, 2841, 2841}, - {2756, 2845, 2845}, - {2719, 2855, 2855}, - {1423, 2859, 2859}, - {2849, 2863, 2865}, - {1474, 2871, 2871}, - {1161, 2875, 2876}, - {2282, 2880, 2881}, - {2746, 2888, 2888}, - {1783, 2893, 2893}, - {2401, 2899, 2900}, - {2632, 2920, 2923}, - {2422, 2928, 2930}, - {2715, 2939, 2939}, - {2162, 2943, 2943}, - {2859, 2947, 2947}, - {1910, 2951, 2951}, - {1431, 2955, 2956}, - {1439, 2964, 2964}, - {2501, 2968, 2969}, - {2029, 2973, 2976}, - {689, 2983, 2984}, - {1658, 2988, 2988}, - {1031, 2996, 2996}, - {2149, 3001, 3002}, - {25, 3009, 3013}, - {2964, 3023, 3023}, - {953, 3027, 3028}, - {2359, 3036, 3036}, - {3023, 3049, 3049}, - {2880, 3055, 3056}, - {2973, 3076, 3077}, - {2874, 3090, 3090}, - {2871, 3094, 3094}, - {2532, 3100, 3100}, - {2938, 3107, 3108}, - {350, 3115, 3115}, - {2196, 3119, 3121}, - {1133, 3127, 3129}, - {1797, 3134, 3150}, - {3032, 3158, 3158}, - {3016, 3172, 3172}, - {2533, 3179, 3179}, - {3055, 3187, 3188}, - {1384, 3192, 3193}, - {2799, 3199, 3199}, - {2126, 3203, 3207}, - {2334, 3215, 3215}, - {2105, 3220, 3221}, - {3199, 3229, 3229}, - {2891, 3233, 3233}, - {855, 3240, 3240}, - {1852, 3253, 3256}, - {2140, 3263, 3263}, - {1682, 3268, 3270}, - {3243, 3274, 3274}, - {924, 3279, 3279}, - {2212, 3283, 3283}, - {2596, 3287, 3287}, - {2999, 3291, 3291}, - {2353, 3295, 3295}, - {2480, 3302, 3304}, - {1959, 3308, 3311}, - {3000, 3318, 3318}, - {845, 3330, 3330}, - {2283, 3334, 3334}, - {2519, 3342, 3342}, - {3325, 3346, 3348}, - {2397, 3353, 3354}, - {2763, 3358, 3358}, - {3198, 3363, 3364}, - {3211, 3368, 3372}, - {2950, 3376, 3377}, - {3245, 3388, 3391}, - {2264, 3398, 3398}, - {795, 3403, 3403}, - {3287, 3407, 3407}, - {3358, 3411, 3411}, - {3317, 3415, 3415}, - {3232, 3431, 3431}, - {2128, 3435, 3437}, - {3236, 3441, 3441}, - {3398, 3445, 3446}, - {2814, 3450, 3450}, - {3394, 3466, 3466}, - {2425, 3470, 3470}, - {3330, 3476, 3476}, - {1612, 3480, 3480}, - {1004, 3485, 3486}, - {2732, 3490, 3490}, - {1117, 3494, 3495}, - {629, 3501, 3501}, - {3087, 3514, 3514}, - {684, 3518, 3518}, - {3489, 3522, 3524}, - {1760, 3529, 3529}, - {617, 3537, 3537}, - {3431, 3541, 3541}, - {997, 3547, 3547}, - {882, 3552, 3553}, - {2419, 3558, 3558}, - {610, 3562, 3563}, - {1903, 3567, 3569}, - {3005, 3575, 3575}, - {3076, 3585, 3586}, - {3541, 3590, 3590}, - {3490, 3594, 3594}, - {1899, 3599, 3599}, - {3545, 3606, 3606}, - {3290, 3614, 3615}, - {2056, 3619, 3620}, - {3556, 3625, 3625}, - {3294, 3632, 3633}, - {637, 3643, 3644}, - {3609, 3648, 3650}, - {3175, 3658, 3658}, - {3498, 3665, 3665}, - {1597, 3669, 3669}, - {1983, 3673, 3673}, - {3215, 3682, 3682}, - {3544, 3689, 3689}, - {3694, 3698, 3698}, - {3228, 3715, 3716}, - {2594, 3720, 3722}, - {3573, 3726, 3726}, - {2479, 3732, 3735}, - {3191, 3741, 3742}, - {1113, 3746, 3747}, - {2844, 3751, 3751}, - {3445, 3756, 3757}, - {3755, 3766, 3766}, - {3421, 3775, 3780}, - {3593, 3784, 3786}, - {3263, 3796, 3796}, - {3469, 3806, 3806}, - {2602, 3815, 3815}, - {723, 3819, 3821}, - {1608, 3826, 3826}, - {3334, 3830, 3830}, - {2198, 3835, 3835}, - {2635, 3840, 3840}, - {3702, 3852, 3853}, - {3406, 3858, 3859}, - {3681, 3867, 3870}, - {3407, 3880, 3880}, - {340, 3889, 3889}, - {3772, 3893, 3893}, - {593, 3897, 3897}, - {2563, 3914, 3916}, - {2981, 3929, 3929}, - {1835, 3933, 3934}, - {3906, 3951, 3951}, - {1459, 3958, 3958}, - {3889, 3974, 3974}, - {2188, 3982, 3982}, - {3220, 3986, 3987}, - {3585, 3991, 3993}, - {3712, 3997, 4001}, - {2805, 4007, 4007}, - {1879, 4012, 4013}, - {3618, 4018, 4018}, - {1145, 4031, 4032}, - {3901, 4037, 4037}, - {2772, 4046, 4047}, - {2802, 4053, 4054}, - {3299, 4058, 4058}, - {3725, 4066, 4066}, - {2271, 4070, 4070}, - {385, 4075, 4076}, - {3624, 4089, 4090}, - {3745, 4096, 4098}, - {1563, 4102, 4102}, - {4045, 4106, 4111}, - {3696, 4115, 4119}, - {3376, 4125, 4126}, - {1880, 4130, 4130}, - {2048, 4140, 4141}, - {2724, 4149, 4149}, - {1767, 4156, 4156}, - {2601, 4164, 4164}, - {2757, 4168, 4168}, - {3974, 4172, 4172}, - {3914, 4178, 4178}, - {516, 4185, 4185}, - {1032, 4189, 4190}, - {3462, 4197, 4198}, - {3805, 4202, 4203}, - {3910, 4207, 4212}, - {3075, 4221, 4221}, - {3756, 4225, 4226}, - {1872, 4236, 4237}, - {3844, 4241, 4241}, - {3991, 4245, 4249}, - {2203, 4258, 4258}, - {3903, 4267, 4268}, - {705, 4272, 4272}, - {1896, 4276, 4276}, - {1955, 4285, 4288}, - {3746, 4302, 4303}, - {2672, 4311, 4311}, - {3969, 4317, 4317}, - {3883, 4322, 4322}, - {1920, 4339, 4340}, - {3527, 4344, 4346}, - {1160, 4358, 4358}, - {3648, 4364, 4366}, - {2711, 4387, 4387}, - {3619, 4391, 4392}, - {1944, 4396, 4396}, - {4369, 4400, 4400}, - {2736, 4404, 4407}, - {2546, 4411, 4412}, - {4390, 4422, 4422}, - {3610, 4426, 4427}, - {4058, 4431, 4431}, - {4374, 4435, 4435}, - {3463, 4445, 4446}, - {1813, 4452, 4452}, - {3669, 4456, 4456}, - {3830, 4460, 4460}, - {421, 4464, 4465}, - {1719, 4471, 4471}, - {3880, 4475, 4475}, - {1834, 4485, 4487}, - {3590, 4491, 4491}, - {442, 4496, 4497}, - {4435, 4501, 4501}, - {3814, 4509, 4509}, - {987, 4513, 4513}, - {4494, 4518, 4521}, - {3218, 4526, 4529}, - {4221, 4537, 4537}, - {2778, 4543, 4545}, - {4422, 4552, 4552}, - {4031, 4558, 4559}, - {4178, 4563, 4563}, - {3726, 4567, 4574}, - {4027, 4578, 4578}, - {4339, 4585, 4587}, - {3796, 4592, 4595}, - {543, 4600, 4613}, - {2855, 4620, 4621}, - {2795, 4627, 4627}, - {3440, 4631, 4632}, - {4279, 4636, 4639}, - {4245, 4643, 4645}, - {4516, 4649, 4650}, - {3133, 4654, 4654}, - {4042, 4658, 4659}, - {3422, 4663, 4663}, - {4046, 4667, 4668}, - {4267, 4672, 4672}, - {4004, 4676, 4677}, - {2490, 4682, 4682}, - {2451, 4697, 4697}, - {3027, 4705, 4705}, - {4028, 4717, 4717}, - {4460, 4721, 4721}, - {2471, 4725, 4727}, - {3090, 4735, 4735}, - {3192, 4739, 4740}, - {3835, 4760, 4760}, - {4540, 4764, 4764}, - {4007, 4772, 4774}, - {619, 4784, 4784}, - {3561, 4789, 4791}, - {3367, 4805, 4805}, - {4490, 4810, 4811}, - {2402, 4815, 4815}, - {3352, 4819, 4822}, - {2773, 4828, 4828}, - {4552, 4832, 4832}, - {2522, 4840, 4841}, - {316, 4847, 4852}, - {4715, 4858, 4858}, - {2959, 4862, 4862}, - {4858, 4868, 4869}, - {2134, 4873, 4873}, - {578, 4878, 4878}, - {4189, 4889, 4890}, - {2229, 4894, 4894}, - {4501, 4898, 4898}, - {2297, 4903, 4903}, - {2933, 4909, 4909}, - {3008, 4913, 4913}, - {3153, 4917, 4917}, - {4819, 4921, 4921}, - {4921, 4932, 4933}, - {4920, 4944, 4945}, - {4814, 4954, 4955}, - {576, 4966, 4966}, - {1854, 4970, 4971}, - {1374, 4975, 4976}, - {3307, 4980, 4980}, - {974, 4984, 4988}, - {4721, 4992, 4992}, - {4898, 4996, 4996}, - {4475, 5006, 5006}, - {3819, 5012, 5012}, - {1948, 5019, 5021}, - {4954, 5027, 5029}, - {3740, 5038, 5040}, - {4763, 5044, 5045}, - {1936, 5051, 5051}, - {4844, 5055, 5060}, - {4215, 5069, 5072}, - {1146, 5076, 5076}, - {3845, 5082, 5082}, - {4865, 5090, 5090}, - {4624, 5094, 5094}, - {4815, 5098, 5098}, - {5006, 5105, 5105}, - {4980, 5109, 5109}, - {4795, 5113, 5115}, - {5043, 5119, 5121}, - {4782, 5129, 5129}, - {3826, 5139, 5139}, - {3876, 5156, 5156}, - {3111, 5167, 5171}, - {1470, 5177, 5177}, - {4431, 5181, 5181}, - {546, 5189, 5189}, - {4225, 5193, 5193}, - {1672, 5199, 5201}, - {4207, 5205, 5209}, - {4220, 5216, 5217}, - {4658, 5224, 5225}, - {3295, 5235, 5235}, - {2436, 5239, 5239}, - {2349, 5246, 5246}, - {2175, 5250, 5250}, - {5180, 5257, 5258}, - {3161, 5263, 5263}, - {5105, 5272, 5272}, - {3552, 5282, 5282}, - {4944, 5299, 5300}, - {4130, 5312, 5313}, - {902, 5323, 5323}, - {913, 5327, 5327}, - {2987, 5333, 5334}, - {5150, 5344, 5344}, - {5249, 5348, 5348}, - {1965, 5358, 5359}, - {5330, 5364, 5364}, - {2012, 5373, 5377}, - {712, 5384, 5386}, - {5235, 5390, 5390}, - {5044, 5398, 5399}, - {564, 5406, 5406}, - {39, 5410, 5410}, - {4642, 5422, 5425}, - {4421, 5437, 5438}, - {2347, 5449, 5449}, - {5333, 5453, 5454}, - {4136, 5458, 5459}, - {3793, 5468, 5468}, - {2243, 5480, 5480}, - {4889, 5492, 5493}, - {4295, 5504, 5504}, - {2785, 5511, 5511}, - {2377, 5518, 5518}, - {3662, 5525, 5525}, - {5097, 5529, 5530}, - {4781, 5537, 5538}, - {4697, 5547, 5548}, - {436, 5552, 5553}, - {5542, 5558, 5558}, - {3692, 5562, 5562}, - {2696, 5568, 5569}, - {4620, 5578, 5578}, - {2898, 5590, 5590}, - {5557, 5596, 5618}, - {2797, 5623, 5625}, - {2792, 5629, 5629}, - {5243, 5633, 5633}, - {5348, 5637, 5637}, - {5547, 5643, 5643}, - {4296, 5654, 5655}, - {5568, 5662, 5662}, - {3001, 5670, 5671}, - {3794, 5679, 5679}, - {4006, 5685, 5686}, - {4969, 5690, 5692}, - {687, 5704, 5704}, - {4563, 5708, 5708}, - {1723, 5738, 5738}, - {649, 5742, 5742}, - {5163, 5748, 5755}, - {3907, 5759, 5759}, - {3074, 5764, 5764}, - {5326, 5771, 5771}, - {2951, 5776, 5776}, - {5181, 5780, 5780}, - {2614, 5785, 5788}, - {4709, 5794, 5794}, - {2784, 5799, 5799}, - {5518, 5803, 5803}, - {4155, 5812, 5815}, - {921, 5819, 5819}, - {5224, 5823, 5824}, - {2853, 5830, 5836}, - {5776, 5840, 5840}, - {2955, 5844, 5845}, - {5745, 5853, 5853}, - {3291, 5857, 5857}, - {2988, 5861, 5861}, - {2647, 5865, 5865}, - {5398, 5869, 5870}, - {1085, 5874, 5875}, - {4906, 5881, 5881}, - {802, 5886, 5886}, - {5119, 5890, 5893}, - {5802, 5899, 5900}, - {3415, 5904, 5904}, - {5629, 5908, 5908}, - {3714, 5912, 5914}, - {5558, 5921, 5921}, - {2710, 5927, 5928}, - {1094, 5932, 5934}, - {2653, 5940, 5941}, - {4735, 5954, 5954}, - {5861, 5958, 5958}, - {1040, 5971, 5971}, - {5514, 5977, 5977}, - {5048, 5981, 5982}, - {5953, 5992, 5993}, - {3751, 5997, 5997}, - {4991, 6001, 6002}, - {5885, 6006, 6007}, - {5529, 6011, 6012}, - {4974, 6019, 6020}, - {5857, 6024, 6024}, - {3483, 6032, 6032}, - {3594, 6036, 6036}, - {1997, 6040, 6040}, - {5997, 6044, 6047}, - {5197, 6051, 6051}, - {1764, 6055, 6055}, - {6050, 6059, 6059}, - {5239, 6063, 6063}, - {5049, 6067, 6067}, - {5957, 6073, 6074}, - {1022, 6078, 6078}, - {3414, 6083, 6084}, - {3809, 6090, 6090}, - {4562, 6095, 6096}, - {5878, 6104, 6104}, - {594, 6108, 6109}, - {3353, 6115, 6116}, - {4992, 6120, 6121}, - {2424, 6125, 6125}, - {4484, 6130, 6130}, - {3900, 6134, 6135}, - {5793, 6139, 6141}, - {3562, 6145, 6145}, - {1438, 6152, 6153}, - {6058, 6157, 6158}, - {4411, 6162, 6163}, - {4590, 6167, 6171}, - {4748, 6175, 6175}, - {5517, 6183, 6184}, - {6095, 6191, 6192}, - {1471, 6203, 6203}, - {2643, 6209, 6210}, - {450, 6220, 6220}, - {5266, 6226, 6226}, - {2576, 6233, 6233}, - {2607, 6239, 6240}, - {5164, 6244, 6251}, - {6054, 6255, 6255}, - {1789, 6260, 6261}, - {5250, 6265, 6265}, - {6062, 6273, 6278}, - {5990, 6282, 6282}, - {3283, 6286, 6286}, - {5436, 6290, 6290}, - {6059, 6294, 6294}, - {5668, 6298, 6300}, - {3072, 6324, 6329}, - {3132, 6338, 6339}, - {3246, 6343, 6344}, - {28, 6348, 6349}, - {1503, 6353, 6355}, - {6067, 6359, 6359}, - {3384, 6364, 6364}, - {545, 6375, 6376}, - {5803, 6380, 6380}, - {5522, 6384, 6385}, - {5908, 6389, 6389}, - {2796, 6393, 6396}, - {4831, 6403, 6404}, - {6388, 6412, 6412}, - {6005, 6417, 6420}, - {4450, 6430, 6430}, - {4050, 6435, 6435}, - {5372, 6441, 6441}, - {4378, 6447, 6447}, - {6199, 6452, 6452}, - {3026, 6456, 6456}, - {2642, 6460, 6462}, - {6392, 6470, 6470}, - {6459, 6474, 6474}, - {2829, 6487, 6488}, - {2942, 6499, 6504}, - {5069, 6508, 6511}, - {5341, 6515, 6516}, - {5853, 6521, 6525}, - {6104, 6531, 6531}, - {5759, 6535, 6538}, - {4672, 6542, 6543}, - {2443, 6550, 6550}, - {5109, 6554, 6554}, - {6494, 6558, 6560}, - {6006, 6570, 6572}, - {6424, 6576, 6580}, - {4693, 6591, 6592}, - {6439, 6596, 6597}, - {3179, 6601, 6601}, - {5299, 6606, 6607}, - {4148, 6612, 6613}, - {3774, 6617, 6617}, - {3537, 6623, 6624}, - {4975, 6628, 6629}, - {3848, 6636, 6636}, - {856, 6640, 6640}, - {5724, 6645, 6645}, - {6632, 6651, 6651}, - {4630, 6656, 6658}, - {1440, 6662, 6662}, - {4281, 6666, 6667}, - {4302, 6671, 6672}, - {2589, 6676, 6677}, - {5647, 6681, 6687}, - {6082, 6691, 6693}, - {6144, 6698, 6698}, - {6103, 6709, 6710}, - {3710, 6714, 6714}, - {4253, 6718, 6721}, - {2467, 6730, 6730}, - {4778, 6734, 6734}, - {6528, 6738, 6738}, - {4358, 6747, 6747}, - {5889, 6753, 6753}, - {5193, 6757, 6757}, - {5797, 6761, 6761}, - {3858, 6765, 6766}, - {5951, 6776, 6776}, - {6487, 6781, 6782}, - {3282, 6786, 6787}, - {4667, 6797, 6799}, - {1927, 6803, 6806}, - {6583, 6810, 6810}, - {4937, 6814, 6814}, - {6099, 6824, 6824}, - {4415, 6835, 6836}, - {6332, 6840, 6841}, - {5160, 6850, 6850}, - {4764, 6854, 6854}, - {6814, 6858, 6859}, - {3018, 6864, 6864}, - {6293, 6868, 6869}, - {6359, 6877, 6877}, - {3047, 6884, 6886}, - {5262, 6890, 6891}, - {5471, 6900, 6900}, - {3268, 6910, 6912}, - {1047, 6916, 6916}, - {5904, 6923, 6923}, - {5798, 6933, 6938}, - {4149, 6942, 6942}, - {1821, 6946, 6946}, - {3599, 6952, 6952}, - {6470, 6957, 6957}, - {5562, 6961, 6961}, - {6268, 6965, 6967}, - {6389, 6971, 6971}, - {6596, 6975, 6976}, - {6553, 6980, 6981}, - {6576, 6985, 6989}, - {1375, 6993, 6993}, - {652, 6998, 6998}, - {4876, 7002, 7003}, - {5768, 7011, 7013}, - {3973, 7017, 7017}, - {6802, 7025, 7025}, - {6955, 7034, 7036}, - {6974, 7040, 7040}, - {5944, 7044, 7044}, - {6992, 7048, 7054}, - {6872, 7059, 7059}, - {2943, 7063, 7063}, - {6923, 7067, 7067}, - {5094, 7071, 7071}, - {4873, 7075, 7075}, - {5819, 7079, 7079}, - {5945, 7085, 7085}, - {1540, 7090, 7091}, - {2090, 7095, 7095}, - {5024, 7104, 7105}, - {6900, 7109, 7109}, - {6024, 7113, 7114}, - {6000, 7118, 7120}, - {2187, 7124, 7125}, - {6760, 7129, 7130}, - {5898, 7134, 7136}, - {7032, 7144, 7144}, - {4271, 7148, 7148}, - {3706, 7152, 7152}, - {6970, 7156, 7157}, - {7088, 7161, 7163}, - {2718, 7168, 7169}, - {5674, 7175, 7175}, - {4631, 7182, 7182}, - {7070, 7188, 7189}, - {6220, 7196, 7196}, - {3458, 7201, 7202}, - {2041, 7211, 7212}, - {1454, 7216, 7216}, - {5199, 7225, 7227}, - {3529, 7234, 7234}, - {6890, 7238, 7238}, - {3815, 7242, 7243}, - {5490, 7250, 7253}, - {6554, 7257, 7263}, - {5890, 7267, 7269}, - {6877, 7273, 7273}, - {4877, 7277, 7277}, - {2502, 7285, 7285}, - {1483, 7289, 7295}, - {7210, 7304, 7308}, - {6845, 7313, 7316}, - {7219, 7320, 7320}, - {7001, 7325, 7329}, - {6853, 7333, 7334}, - {6120, 7338, 7338}, - {6606, 7342, 7343}, - {7020, 7348, 7350}, - {3509, 7354, 7354}, - {7133, 7359, 7363}, - {3434, 7371, 7374}, - {2787, 7384, 7384}, - {7044, 7388, 7388}, - {6960, 7394, 7395}, - {6676, 7399, 7400}, - {7161, 7404, 7404}, - {7285, 7417, 7418}, - {4558, 7425, 7426}, - {4828, 7430, 7430}, - {6063, 7436, 7436}, - {3597, 7442, 7442}, - {914, 7446, 7446}, - {7320, 7452, 7454}, - {7267, 7458, 7460}, - {5076, 7464, 7464}, - {7430, 7468, 7469}, - {6273, 7473, 7474}, - {7440, 7478, 7487}, - {7348, 7491, 7494}, - {1021, 7510, 7510}, - {7473, 7515, 7515}, - {2823, 7519, 7519}, - {6264, 7527, 7527}, - {7302, 7531, 7531}, - {7089, 7535, 7535}, - {7342, 7540, 7541}, - {3688, 7547, 7551}, - {3054, 7558, 7560}, - {4177, 7566, 7567}, - {6691, 7574, 7575}, - {7156, 7585, 7586}, - {7147, 7590, 7592}, - {7407, 7598, 7598}, - {7403, 7602, 7603}, - {6868, 7607, 7607}, - {6636, 7611, 7611}, - {4805, 7617, 7617}, - {5779, 7623, 7623}, - {7063, 7627, 7627}, - {5079, 7632, 7632}, - {7377, 7637, 7637}, - {7337, 7641, 7642}, - {6738, 7655, 7655}, - {7338, 7659, 7659}, - {6541, 7669, 7671}, - {595, 7675, 7675}, - {7658, 7679, 7680}, - {7647, 7685, 7686}, - {2477, 7690, 7690}, - {5823, 7694, 7694}, - {4156, 7699, 7699}, - {5931, 7703, 7706}, - {6854, 7712, 7712}, - {4931, 7718, 7718}, - {6979, 7722, 7722}, - {5085, 7727, 7727}, - {6965, 7732, 7732}, - {7201, 7736, 7737}, - {3639, 7741, 7743}, - {7534, 7749, 7749}, - {4292, 7753, 7753}, - {3427, 7759, 7763}, - {7273, 7767, 7767}, - {940, 7778, 7778}, - {4838, 7782, 7785}, - {4216, 7790, 7792}, - {922, 7800, 7801}, - {7256, 7810, 7811}, - {7789, 7815, 7819}, - {7225, 7823, 7825}, - {7531, 7829, 7829}, - {6997, 7833, 7833}, - {7757, 7837, 7838}, - {4129, 7842, 7842}, - {7333, 7848, 7849}, - {6776, 7855, 7855}, - {7527, 7859, 7859}, - {4370, 7863, 7863}, - {4512, 7868, 7868}, - {5679, 7880, 7880}, - {3162, 7884, 7885}, - {3933, 7892, 7894}, - {7804, 7899, 7902}, - {6363, 7906, 7907}, - {7848, 7911, 7912}, - {5584, 7917, 7921}, - {874, 7926, 7926}, - {3342, 7930, 7930}, - {4507, 7935, 7937}, - {3672, 7943, 7944}, - {7911, 7948, 7949}, - {6402, 7956, 7956}, - {7940, 7960, 7960}, - {7113, 7964, 7964}, - {1073, 7968, 7968}, - {7740, 7974, 7974}, - {7601, 7978, 7982}, - {6797, 7987, 7988}, - {3528, 7994, 7995}, - {5483, 7999, 7999}, - {5717, 8011, 8011}, - {5480, 8017, 8017}, - {7770, 8023, 8030}, - {2452, 8034, 8034}, - {5282, 8047, 8047}, - {7967, 8051, 8051}, - {1128, 8058, 8066}, - {6348, 8070, 8070}, - {8055, 8077, 8077}, - {7925, 8081, 8086}, - {6810, 8090, 8090}, - {5051, 8101, 8101}, - {4696, 8109, 8110}, - {5129, 8119, 8119}, - {4449, 8123, 8123}, - {7222, 8127, 8127}, - {4649, 8131, 8134}, - {7994, 8138, 8138}, - {5954, 8148, 8148}, - {475, 8152, 8153}, - {7906, 8157, 8157}, - {7458, 8164, 8166}, - {7632, 8171, 8173}, - {3874, 8177, 8183}, - {4391, 8187, 8187}, - {561, 8191, 8191}, - {2417, 8195, 8195}, - {2357, 8204, 8204}, - {2269, 8216, 8218}, - {3968, 8222, 8222}, - {2200, 8226, 8227}, - {3453, 8247, 8247}, - {2439, 8251, 8252}, - {7175, 8257, 8257}, - {976, 8262, 8264}, - {4953, 8273, 8273}, - {4219, 8278, 8278}, - {6, 8285, 8291}, - {5703, 8295, 8296}, - {5272, 8300, 8300}, - {8037, 8304, 8304}, - {8186, 8314, 8314}, - {8304, 8318, 8318}, - {8051, 8326, 8326}, - {8318, 8330, 8330}, - {2671, 8334, 8335}, - {2662, 8339, 8339}, - {8081, 8349, 8350}, - {3328, 8356, 8356}, - {2879, 8360, 8362}, - {8050, 8370, 8371}, - {8330, 8375, 8376}, - {8375, 8386, 8386}, - {4961, 8390, 8390}, - {1017, 8403, 8405}, - {3533, 8416, 8416}, - {4555, 8422, 8422}, - {6445, 8426, 8426}, - {8169, 8432, 8432}, - {990, 8436, 8436}, - {4102, 8440, 8440}, - {7398, 8444, 8446}, - {3480, 8450, 8450}, - {6324, 8462, 8462}, - {7948, 8466, 8467}, - {5950, 8471, 8471}, - {5189, 8476, 8476}, - {4026, 8490, 8490}, - {8374, 8494, 8495}, - {4682, 8501, 8501}, - {7387, 8506, 8506}, - {8164, 8510, 8515}, - {4079, 8524, 8524}, - {8360, 8529, 8531}, - {7446, 8540, 8543}, - {7971, 8547, 8548}, - {4311, 8552, 8552}, - {5204, 8556, 8557}, - {7968, 8562, 8562}, - {7847, 8571, 8573}, - {8547, 8577, 8577}, - {5320, 8581, 8581}, - {8556, 8585, 8586}, - {8504, 8590, 8590}, - {7669, 8602, 8604}, - {5874, 8608, 8609}, - {5828, 8613, 8613}, - {7998, 8617, 8617}, - {8519, 8625, 8625}, - {7250, 8637, 8637}, - {426, 8641, 8641}, - {8436, 8645, 8645}, - {5986, 8649, 8656}, - {8157, 8660, 8660}, - {7182, 8665, 8665}, - {8421, 8675, 8675}, - {8509, 8681, 8681}, - {5137, 8688, 8689}, - {8625, 8694, 8695}, - {5228, 8701, 8702}, - {6661, 8714, 8714}, - {1010, 8719, 8719}, - {6648, 8723, 8723}, - {3500, 8728, 8728}, - {2442, 8735, 8735}, - {8494, 8740, 8741}, - {8171, 8753, 8755}, - {7242, 8763, 8764}, - {4739, 8768, 8769}, - {7079, 8773, 8773}, - {8386, 8777, 8777}, - {8624, 8781, 8787}, - {661, 8791, 8794}, - {8631, 8801, 8801}, - {7753, 8805, 8805}, - {4783, 8809, 8810}, - {1673, 8814, 8815}, - {6623, 8819, 8819}, - {4404, 8823, 8823}, - {8089, 8827, 8828}, - {8773, 8832, 8832}, - {5394, 8836, 8836}, - {6231, 8841, 8843}, - {1015, 8852, 8853}, - {6873, 8857, 8857}, - {6289, 8865, 8865}, - {8577, 8869, 8869}, - {8114, 8873, 8875}, - {8534, 8883, 8883}, - {3007, 8887, 8888}, - {8827, 8892, 8893}, - {4788, 8897, 8900}, - {5698, 8906, 8907}, - {7690, 8911, 8911}, - {6643, 8919, 8919}, - {7206, 8923, 8924}, - {7866, 8929, 8931}, - {8880, 8942, 8942}, - {8630, 8951, 8952}, - {6027, 8958, 8958}, - {7749, 8966, 8967}, - {4932, 8972, 8973}, - {8892, 8980, 8981}, - {634, 9003, 9003}, - {8109, 9007, 9008}, - {8777, 9012, 9012}, - {3981, 9016, 9017}, - {5723, 9025, 9025}, - {7662, 9034, 9038}, - {8955, 9042, 9042}, - {8070, 9060, 9062}, - {8910, 9066, 9066}, - {5363, 9070, 9071}, - {7699, 9075, 9076}, - {8991, 9081, 9081}, - {6850, 9085, 9085}, - {5811, 9092, 9094}, - {9079, 9098, 9102}, - {6456, 9106, 9106}, - {2259, 9111, 9111}, - {4752, 9116, 9116}, - {9060, 9120, 9123}, - {8090, 9127, 9127}, - {5305, 9131, 9132}, - {8623, 9137, 9137}, - {7417, 9141, 9141}, - {6564, 9148, 9149}, - {9126, 9157, 9158}, - {4285, 9169, 9170}, - {8698, 9174, 9174}, - {8869, 9178, 9178}, - {2572, 9182, 9183}, - {6482, 9188, 9190}, - {9181, 9201, 9201}, - {2968, 9208, 9209}, - {2506, 9213, 9215}, - {9127, 9219, 9219}, - {7910, 9225, 9227}, - {5422, 9235, 9239}, - {8813, 9244, 9246}, - {9178, 9250, 9250}, - {8748, 9255, 9255}, - {7354, 9265, 9265}, - {7767, 9269, 9269}, - {7710, 9281, 9283}, - {8826, 9288, 9290}, - {861, 9295, 9295}, - {4482, 9301, 9301}, - {9264, 9305, 9306}, - {8805, 9310, 9310}, - {4995, 9314, 9314}, - {6730, 9318, 9318}, - {7457, 9328, 9328}, - {2547, 9335, 9336}, - {6298, 9340, 9343}, - {9305, 9353, 9354}, - {9269, 9358, 9358}, - {6338, 9370, 9370}, - {7289, 9376, 9379}, - {5780, 9383, 9383}, - {7607, 9387, 9387}, - {2065, 9392, 9392}, - {7238, 9396, 9396}, - {8856, 9400, 9400}, - {8069, 9412, 9413}, - {611, 9420, 9420}, - {7071, 9424, 9424}, - {3089, 9430, 9431}, - {7117, 9435, 9438}, - {1976, 9445, 9445}, - {6640, 9449, 9449}, - {5488, 9453, 9453}, - {8739, 9457, 9459}, - {5958, 9466, 9466}, - {7985, 9470, 9470}, - {8735, 9475, 9475}, - {5009, 9479, 9479}, - {8073, 9483, 9484}, - {2328, 9490, 9491}, - {9250, 9495, 9495}, - {4043, 9502, 9502}, - {7712, 9506, 9506}, - {9012, 9510, 9510}, - {9028, 9514, 9515}, - {2190, 9521, 9524}, - {9029, 9528, 9528}, - {9519, 9532, 9532}, - {9495, 9536, 9536}, - {8527, 9540, 9540}, - {2137, 9550, 9550}, - {8419, 9557, 9557}, - {9383, 9561, 9562}, - {8970, 9575, 9578}, - {8911, 9582, 9582}, - {7828, 9595, 9596}, - {6180, 9600, 9600}, - {8738, 9604, 9607}, - {7540, 9611, 9612}, - {9599, 9616, 9618}, - {9187, 9623, 9623}, - {9294, 9628, 9629}, - {4536, 9639, 9639}, - {3867, 9643, 9643}, - {6305, 9648, 9648}, - {1617, 9654, 9657}, - {5762, 9666, 9666}, - {8314, 9670, 9670}, - {9666, 9674, 9675}, - {9506, 9679, 9679}, - {9669, 9685, 9686}, - {9683, 9690, 9690}, - {8763, 9697, 9698}, - {7468, 9702, 9702}, - {460, 9707, 9707}, - {3115, 9712, 9712}, - {9424, 9716, 9717}, - {7359, 9721, 9724}, - {7547, 9728, 9729}, - {7151, 9733, 9738}, - {7627, 9742, 9742}, - {2822, 9747, 9747}, - {8247, 9751, 9753}, - {9550, 9758, 9758}, - {7585, 9762, 9763}, - {1002, 9767, 9767}, - {7168, 9772, 9773}, - {6941, 9777, 9780}, - {9728, 9784, 9786}, - {9770, 9792, 9796}, - {6411, 9801, 9802}, - {3689, 9806, 9808}, - {9575, 9814, 9816}, - {7025, 9820, 9821}, - {2776, 9826, 9826}, - {9806, 9830, 9830}, - {9820, 9834, 9835}, - {9800, 9839, 9847}, - {9834, 9851, 9852}, - {9829, 9856, 9862}, - {1400, 9866, 9866}, - {3197, 9870, 9871}, - {9851, 9875, 9876}, - {9742, 9883, 9884}, - {3362, 9888, 9889}, - {9883, 9893, 9893}, - {5711, 9899, 9910}, - {7806, 9915, 9915}, - {9120, 9919, 9919}, - {9715, 9925, 9934}, - {2580, 9938, 9938}, - {4907, 9942, 9944}, - {6239, 9953, 9954}, - {6961, 9963, 9963}, - {5295, 9967, 9968}, - {1915, 9972, 9973}, - {3426, 9983, 9985}, - {9875, 9994, 9995}, - {6942, 9999, 9999}, - {6621, 10005, 10005}, - {7589, 10010, 10012}, - {9286, 10020, 10020}, - {838, 10024, 10024}, - {9980, 10028, 10031}, - {9994, 10035, 10041}, - {2702, 10048, 10051}, - {2621, 10059, 10059}, - {10054, 10065, 10065}, - {8612, 10073, 10074}, - {7033, 10078, 10078}, - {916, 10082, 10082}, - {10035, 10086, 10087}, - {8613, 10097, 10097}, - {9919, 10107, 10108}, - {6133, 10114, 10115}, - {10059, 10119, 10119}, - {10065, 10126, 10127}, - {7732, 10131, 10131}, - {7155, 10135, 10136}, - {6728, 10140, 10140}, - {6162, 10144, 10145}, - {4724, 10150, 10150}, - {1665, 10154, 10154}, - {10126, 10163, 10163}, - {9783, 10168, 10168}, - {1715, 10172, 10173}, - {7152, 10177, 10182}, - {8760, 10187, 10187}, - {7829, 10191, 10191}, - {9679, 10196, 10196}, - {9369, 10201, 10201}, - {2928, 10206, 10208}, - {6951, 10214, 10217}, - {5633, 10221, 10221}, - {7199, 10225, 10225}, - {10118, 10230, 10231}, - {9999, 10235, 10236}, - {10045, 10240, 10249}, - {5565, 10256, 10256}, - {9866, 10261, 10261}, - {10163, 10268, 10268}, - {9869, 10272, 10272}, - {9789, 10276, 10283}, - {10235, 10287, 10288}, - {10214, 10298, 10299}, - {6971, 10303, 10303}, - {3346, 10307, 10307}, - {10185, 10311, 10312}, - {9993, 10318, 10320}, - {2779, 10332, 10334}, - {1726, 10338, 10338}, - {741, 10354, 10360}, - {10230, 10372, 10373}, - {10260, 10384, 10385}, - {10131, 10389, 10398}, - {6946, 10406, 10409}, - {10158, 10413, 10420}, - {10123, 10424, 10424}, - {6157, 10428, 10429}, - {4518, 10434, 10434}, - {9893, 10438, 10438}, - {9865, 10442, 10446}, - {7558, 10454, 10454}, - {10434, 10460, 10460}, - {10064, 10466, 10468}, - {2703, 10472, 10474}, - {9751, 10478, 10479}, - {6714, 10485, 10485}, - {8020, 10490, 10490}, - {10303, 10494, 10494}, - {3521, 10499, 10500}, - {9281, 10513, 10515}, - {6028, 10519, 10523}, - {9387, 10527, 10527}, - {7614, 10531, 10531}, - {3611, 10536, 10536}, - {9162, 10540, 10540}, - {10081, 10546, 10547}, - {10034, 10560, 10562}, - {6726, 10567, 10571}, - {8237, 10575, 10575}, - {10438, 10579, 10583}, - {10140, 10587, 10587}, - {5784, 10592, 10592}, - {9819, 10597, 10600}, - {10567, 10604, 10608}, - {9335, 10613, 10613}, - {8300, 10617, 10617}, - {10575, 10621, 10621}, - {9678, 10625, 10626}, - {9962, 10632, 10633}, - {10535, 10637, 10638}, - {8199, 10642, 10642}, - {10372, 10647, 10648}, - {10637, 10656, 10657}, - {10579, 10667, 10668}, - {10465, 10677, 10680}, - {6702, 10684, 10685}, - {10073, 10691, 10692}, - {4505, 10696, 10697}, - {9042, 10701, 10701}, - {6460, 10705, 10706}, - {10010, 10714, 10716}, - {10656, 10720, 10722}, - {7282, 10727, 10729}, - {2327, 10733, 10733}, - {2491, 10740, 10741}, - {10704, 10748, 10750}, - {6465, 10754, 10754}, - {10647, 10758, 10759}, - {10424, 10763, 10763}, - {10748, 10776, 10776}, - {10546, 10780, 10781}, - {10758, 10785, 10786}, - {10287, 10790, 10797}, - {10785, 10801, 10807}, - {10240, 10811, 10826}, - {9509, 10830, 10830}, - {2579, 10836, 10838}, - {9801, 10843, 10845}, - {7555, 10849, 10850}, - {10776, 10860, 10865}, - {8023, 10869, 10869}, - {10046, 10876, 10884}, - {10253, 10888, 10892}, - {9941, 10897, 10897}, - {7898, 10901, 10905}, - {6725, 10909, 10913}, - {10757, 10921, 10923}, - {10160, 10931, 10931}, - {10916, 10935, 10942}, - {10261, 10946, 10946}, - {10318, 10952, 10954}, - {5911, 10959, 10961}, - {10801, 10965, 10966}, - {10946, 10970, 10977}, - {10592, 10982, 10984}, - {9913, 10988, 10990}, - {8510, 10994, 10996}, - {9419, 11000, 11001}, - {6765, 11006, 11007}, - {10725, 11011, 11011}, - {5537, 11017, 11019}, - {9208, 11024, 11025}, - {5850, 11030, 11030}, - {9610, 11034, 11036}, - {8846, 11041, 11047}, - {9697, 11051, 11051}, - {1622, 11055, 11058}, - {2370, 11062, 11062}, - {8393, 11067, 11067}, - {9756, 11071, 11071}, - {10172, 11076, 11076}, - {27, 11081, 11081}, - {7357, 11087, 11092}, - {8151, 11104, 11106}, - {6115, 11110, 11110}, - {10667, 11114, 11115}, - {11099, 11121, 11123}, - {10705, 11127, 11127}, - {8938, 11131, 11131}, - {11114, 11135, 11136}, - {1390, 11140, 11141}, - {10964, 11146, 11148}, - {11140, 11152, 11155}, - {9813, 11159, 11166}, - {624, 11171, 11172}, - {3118, 11177, 11179}, - {11029, 11184, 11186}, - {10186, 11190, 11190}, - {10306, 11196, 11196}, - {8665, 11201, 11201}, - {7382, 11205, 11205}, - {1100, 11210, 11210}, - {2337, 11216, 11217}, - {1609, 11221, 11223}, - {5763, 11228, 11229}, - {5220, 11233, 11233}, - {11061, 11241, 11241}, - {10617, 11246, 11246}, - {11190, 11250, 11251}, - {10144, 11255, 11256}, - {11232, 11260, 11260}, - {857, 11264, 11265}, - {10994, 11269, 11271}, - {3879, 11280, 11281}, - {11184, 11287, 11289}, - {9611, 11293, 11295}, - {11250, 11299, 11299}, - {4495, 11304, 11304}, - {7574, 11308, 11309}, - {9814, 11315, 11317}, - {1713, 11321, 11324}, - {1905, 11328, 11328}, - {8745, 11335, 11340}, - {8883, 11351, 11351}, - {8119, 11358, 11358}, - {1842, 11363, 11364}, - {11237, 11368, 11368}, - {8814, 11373, 11374}, - {5684, 11378, 11378}, - {11011, 11382, 11382}, - {6520, 11389, 11389}, - {11183, 11393, 11396}, - {1790, 11404, 11404}, - {9536, 11408, 11408}, - {11298, 11418, 11419}, - {3929, 11425, 11425}, - {5588, 11429, 11429}, - {8476, 11436, 11436}, - {4096, 11440, 11442}, - {11084, 11446, 11454}, - {10603, 11458, 11463}, - {7332, 11472, 11474}, - {7611, 11483, 11486}, - {4836, 11490, 11491}, - {10024, 11495, 11495}, - {4917, 11501, 11506}, - {6486, 11510, 11512}, - {11269, 11516, 11518}, - {3603, 11522, 11525}, - {11126, 11535, 11535}, - {11418, 11539, 11541}, - {11408, 11545, 11545}, - {9021, 11549, 11552}, - {6745, 11557, 11557}, - {5118, 11561, 11564}, - {7590, 11568, 11569}, - {4426, 11573, 11578}, - {9790, 11582, 11583}, - {6447, 11587, 11587}, - {10229, 11591, 11594}, - {10457, 11598, 11598}, - {10168, 11604, 11604}, - {10543, 11608, 11608}, - {7404, 11612, 11612}, - {11127, 11616, 11616}, - {3337, 11620, 11620}, - {11501, 11624, 11628}, - {4543, 11633, 11635}, - {8449, 11642, 11642}, - {4943, 11646, 11648}, - {10526, 11652, 11654}, - {11620, 11659, 11659}, - {8927, 11664, 11669}, - {532, 11673, 11673}, - {10513, 11677, 11679}, - {10428, 11683, 11683}, - {10999, 11689, 11690}, - {9469, 11695, 11695}, - {3606, 11699, 11699}, - {9560, 11708, 11709}, - {1564, 11714, 11714}, - {10527, 11718, 11718}, - {3071, 11723, 11726}, - {11590, 11731, 11732}, - {6605, 11737, 11737}, - {11624, 11741, 11745}, - {7822, 11749, 11752}, - {5269, 11757, 11758}, - {1339, 11767, 11767}, - {1363, 11771, 11773}, - {3704, 11777, 11777}, - {10952, 11781, 11783}, - {6764, 11793, 11795}, - {8675, 11800, 11800}, - {9963, 11804, 11804}, - {11573, 11808, 11809}, - {9548, 11813, 11813}, - {11591, 11817, 11818}, - {11446, 11822, 11822}, - {9224, 11828, 11828}, - {3158, 11836, 11836}, - {10830, 11840, 11840}, - {7234, 11846, 11846}, - {11299, 11850, 11850}, - {11544, 11854, 11855}, - {11498, 11859, 11859}, - {10993, 11865, 11868}, - {9720, 11872, 11878}, - {10489, 11882, 11890}, - {11712, 11898, 11904}, - {11516, 11908, 11910}, - {11568, 11914, 11915}, - {10177, 11919, 11924}, - {11363, 11928, 11929}, - {10494, 11933, 11933}, - {9870, 11937, 11938}, - {9427, 11942, 11942}, - {11481, 11949, 11949}, - {6030, 11955, 11957}, - {11718, 11961, 11961}, - {10531, 11965, 11983}, - {5126, 11987, 11987}, - {7515, 11991, 11991}, - {10646, 11996, 11997}, - {2947, 12001, 12001}, - {9582, 12009, 12010}, - {6202, 12017, 12018}, - {11714, 12022, 12022}, - {9235, 12033, 12037}, - {9721, 12041, 12044}, - {11932, 12051, 12052}, - {12040, 12056, 12056}, - {12051, 12060, 12060}, - {11601, 12066, 12066}, - {8426, 12070, 12070}, - {4053, 12077, 12077}, - {4262, 12081, 12081}, - {9761, 12086, 12088}, - {11582, 12092, 12093}, - {10965, 12097, 12098}, - {11803, 12103, 12104}, - {11933, 12108, 12109}, - {10688, 12117, 12117}, - {12107, 12125, 12126}, - {6774, 12130, 12132}, - {6286, 12137, 12137}, - {9543, 12141, 12141}, - {12097, 12145, 12146}, - {10790, 12150, 12150}, - {10125, 12154, 12156}, - {12125, 12164, 12164}, - {12064, 12168, 12172}, - {10811, 12178, 12188}, - {12092, 12192, 12193}, - {10058, 12197, 12198}, - {11611, 12211, 12212}, - {3459, 12216, 12216}, - {10291, 12225, 12228}, - {12191, 12232, 12234}, - {12145, 12238, 12238}, - {12001, 12242, 12250}, - {3840, 12255, 12255}, - {12216, 12259, 12259}, - {674, 12272, 12272}, - {12141, 12276, 12276}, - {10766, 12280, 12280}, - {11545, 12284, 12284}, - {6496, 12290, 12290}, - {11381, 12294, 12295}, - {603, 12302, 12303}, - {12276, 12308, 12308}, - {11850, 12313, 12314}, - {565, 12319, 12319}, - {9351, 12324, 12324}, - {11822, 12328, 12328}, - {2691, 12333, 12334}, - {11840, 12338, 12338}, - {11070, 12343, 12343}, - {9510, 12347, 12347}, - {11024, 12352, 12353}, - {7173, 12359, 12359}, - {517, 12363, 12363}, - {6311, 12367, 12368}, - {11367, 12372, 12373}, - {12008, 12377, 12377}, - {11372, 12382, 12384}, - {11358, 12391, 12392}, - {11382, 12396, 12396}, - {6882, 12400, 12401}, - {11246, 12405, 12405}, - {8359, 12409, 12412}, - {10154, 12418, 12418}, - {12016, 12425, 12426}, - {8972, 12434, 12435}, - {10478, 12439, 12440}, - {12395, 12449, 12449}, - {11612, 12454, 12454}, - {12347, 12458, 12458}, - {10700, 12466, 12467}, - {3637, 12471, 12476}, - {1042, 12480, 12481}, - {6747, 12488, 12488}, - {12396, 12492, 12493}, - {9420, 12497, 12497}, - {11285, 12501, 12510}, - {4470, 12515, 12515}, - {9374, 12519, 12519}, - {11293, 12528, 12528}, - {2058, 12534, 12535}, - {6521, 12539, 12539}, - {12492, 12543, 12543}, - {3043, 12547, 12547}, - {2982, 12551, 12553}, - {11030, 12557, 12563}, - {7636, 12568, 12568}, - {9639, 12572, 12572}, - {12543, 12576, 12576}, - {5989, 12580, 12583}, - {11051, 12587, 12587}, - {1061, 12592, 12594}, - {12313, 12599, 12601}, - {11846, 12605, 12605}, - {12576, 12609, 12609}, - {11040, 12618, 12625}, - {12479, 12629, 12629}, - {6903, 12633, 12633}, - {12322, 12639, 12639}, - {12253, 12643, 12645}, - {5594, 12651, 12651}, - {12522, 12655, 12655}, - {11703, 12659, 12659}, - {1377, 12665, 12665}, - {8022, 12669, 12669}, - {12280, 12674, 12674}, - {9023, 12680, 12681}, - {12328, 12685, 12685}, - {3085, 12689, 12693}, - {4700, 12698, 12698}, - {10224, 12702, 12702}, - {8781, 12706, 12706}, - {1651, 12710, 12710}, - {12458, 12714, 12714}, - {12005, 12718, 12721}, - {11908, 12725, 12726}, - {8202, 12733, 12733}, - {11708, 12739, 12740}, - {12599, 12744, 12745}, - {12284, 12749, 12749}, - {5285, 12756, 12756}, - {12055, 12775, 12777}, - {6919, 12782, 12782}, - {12242, 12786, 12786}, - {12009, 12790, 12790}, - {9628, 12794, 12796}, - {11354, 12801, 12802}, - {10225, 12806, 12807}, - {579, 12813, 12813}, - {8935, 12817, 12822}, - {8753, 12827, 12829}, - {11006, 12835, 12835}, - {858, 12841, 12845}, - {476, 12849, 12849}, - {7667, 12854, 12854}, - {12760, 12860, 12871}, - {11677, 12875, 12877}, - {12714, 12881, 12881}, - {12731, 12885, 12890}, - {7108, 12894, 12896}, - {1165, 12900, 12900}, - {4021, 12906, 12906}, - {10829, 12910, 12911}, - {12331, 12915, 12915}, - {8887, 12919, 12921}, - {11639, 12925, 12925}, - {7964, 12929, 12929}, - {12528, 12937, 12937}, - {8148, 12941, 12941}, - {12770, 12948, 12950}, - {12609, 12954, 12954}, - {12685, 12958, 12958}, - {2803, 12962, 12962}, - {9561, 12966, 12966}, - {6671, 12972, 12973}, - {12056, 12977, 12977}, - {6380, 12981, 12981}, - {12048, 12985, 12985}, - {11961, 12989, 12993}, - {3368, 12997, 12999}, - {6634, 13004, 13004}, - {6775, 13009, 13010}, - {12136, 13014, 13019}, - {10341, 13023, 13023}, - {13002, 13027, 13027}, - {10587, 13031, 13031}, - {10307, 13035, 13035}, - {12736, 13039, 13039}, - {12744, 13043, 13044}, - {6175, 13048, 13048}, - {9702, 13053, 13054}, - {662, 13059, 13061}, - {12718, 13065, 13068}, - {12893, 13072, 13075}, - {8299, 13086, 13091}, - {12604, 13095, 13096}, - {12848, 13100, 13101}, - {12749, 13105, 13105}, - {12526, 13109, 13114}, - {9173, 13122, 13122}, - {12769, 13128, 13128}, - {13038, 13132, 13132}, - {12725, 13136, 13137}, - {12639, 13146, 13146}, - {9711, 13150, 13151}, - {12137, 13155, 13155}, - {13039, 13159, 13159}, - {4681, 13163, 13164}, - {12954, 13168, 13168}, - {13158, 13175, 13176}, - {13105, 13180, 13180}, - {10754, 13184, 13184}, - {13167, 13188, 13188}, - {12658, 13192, 13192}, - {4294, 13199, 13200}, - {11682, 13204, 13205}, - {11695, 13209, 13209}, - {11076, 13214, 13214}, - {12232, 13218, 13218}, - {9399, 13223, 13224}, - {12880, 13228, 13229}, - {13048, 13234, 13234}, - {9701, 13238, 13239}, - {13209, 13243, 13243}, - {3658, 13248, 13248}, - {3698, 13252, 13254}, - {12237, 13260, 13260}, - {8872, 13266, 13266}, - {12957, 13272, 13273}, - {1393, 13281, 13281}, - {2013, 13285, 13288}, - {4244, 13296, 13299}, - {9428, 13303, 13303}, - {12702, 13307, 13307}, - {13078, 13311, 13311}, - {6071, 13315, 13315}, - {3061, 13319, 13319}, - {2051, 13324, 13324}, - {11560, 13328, 13331}, - {6584, 13336, 13336}, - {8482, 13340, 13340}, - {5331, 13344, 13344}, - {4171, 13348, 13348}, - {8501, 13352, 13352}, - {9219, 13356, 13356}, - {9473, 13360, 13363}, - {12881, 13367, 13367}, - {13065, 13371, 13375}, - {2979, 13379, 13384}, - {1518, 13388, 13388}, - {11177, 13392, 13392}, - {9457, 13398, 13398}, - {12293, 13407, 13410}, - {3697, 13414, 13417}, - {10338, 13425, 13425}, - {13367, 13429, 13429}, - {11074, 13433, 13437}, - {4201, 13441, 13443}, - {1812, 13447, 13448}, - {13360, 13452, 13456}, - {13188, 13463, 13463}, - {9732, 13470, 13470}, - {11332, 13477, 13477}, - {9918, 13487, 13487}, - {6337, 13497, 13497}, - {13429, 13501, 13501}, - {11413, 13505, 13505}, - {4685, 13512, 13513}, - {13136, 13517, 13519}, - {7416, 13528, 13530}, - {12929, 13534, 13534}, - {11110, 13539, 13539}, - {11521, 13543, 13543}, - {12825, 13553, 13553}, - {13447, 13557, 13558}, - {12299, 13562, 13563}, - {9003, 13570, 13570}, - {12500, 13577, 13577}, - {13501, 13581, 13581}, - {9392, 13586, 13586}, - {12454, 13590, 13590}, - {6189, 13595, 13595}, - {13053, 13599, 13599}, - {11881, 13604, 13604}, - {13159, 13608, 13608}, - {4894, 13612, 13612}, - {13221, 13621, 13621}, - {8950, 13625, 13625}, - {13533, 13629, 13629}, - {9633, 13633, 13633}, - {7892, 13637, 13639}, - {13581, 13643, 13643}, - {13616, 13647, 13649}, - {12794, 13653, 13654}, - {8919, 13659, 13659}, - {9674, 13663, 13663}, - {13577, 13668, 13668}, - {12966, 13672, 13672}, - {12659, 13676, 13683}, - {6124, 13688, 13688}, - {9225, 13693, 13695}, - {11833, 13702, 13702}, - {12904, 13709, 13717}, - {13647, 13721, 13722}, - {11687, 13726, 13727}, - {12434, 13731, 13732}, - {12689, 13736, 13742}, - {13168, 13746, 13746}, - {6151, 13751, 13752}, - {11821, 13756, 13757}, - {6467, 13764, 13764}, - {5730, 13769, 13769}, - {5136, 13780, 13780}, - {724, 13784, 13785}, - {13517, 13789, 13791}, - {640, 13795, 13796}, - {7721, 13800, 13802}, - {11121, 13806, 13807}, - {5791, 13811, 13815}, - {12894, 13819, 13819}, - {11100, 13824, 13824}, - {7011, 13830, 13830}, - {7129, 13834, 13837}, - {13833, 13841, 13841}, - {11276, 13847, 13847}, - {13621, 13853, 13853}, - {13589, 13862, 13863}, - {12989, 13867, 13867}, - {12789, 13871, 13871}, - {1239, 13875, 13875}, - {4675, 13879, 13881}, - {4686, 13885, 13885}, - {707, 13889, 13889}, - {5449, 13897, 13898}, - {13867, 13902, 13903}, - {10613, 13908, 13908}, - {13789, 13912, 13914}, - {4451, 13918, 13919}, - {9200, 13924, 13924}, - {2011, 13930, 13930}, - {11433, 13934, 13936}, - {4695, 13942, 13943}, - {9435, 13948, 13951}, - {13688, 13955, 13957}, - {11694, 13961, 13962}, - {5712, 13966, 13966}, - {5991, 13970, 13972}, - {13477, 13976, 13976}, - {10213, 13987, 13987}, - {11839, 13991, 13993}, - {12272, 13997, 13997}, - {6206, 14001, 14001}, - {13179, 14006, 14007}, - {2939, 14011, 14011}, - {12972, 14016, 14017}, - {13918, 14021, 14022}, - {7436, 14026, 14027}, - {7678, 14032, 14034}, - {13586, 14040, 14040}, - {13347, 14044, 14044}, - {13109, 14048, 14051}, - {9244, 14055, 14057}, - {13315, 14061, 14061}, - {13276, 14067, 14067}, - {11435, 14073, 14074}, - {13853, 14078, 14078}, - {13452, 14082, 14082}, - {14044, 14087, 14087}, - {4440, 14091, 14095}, - {4479, 14100, 14103}, - {9395, 14107, 14109}, - {6834, 14119, 14119}, - {10458, 14123, 14124}, - {1429, 14129, 14129}, - {8443, 14135, 14135}, - {10365, 14140, 14140}, - {5267, 14145, 14145}, - {11834, 14151, 14153}, -} diff --git a/vendor/github.com/golang/snappy/misc/main.cpp b/vendor/github.com/golang/snappy/misc/main.cpp deleted file mode 100644 index 24a3d9a9..00000000 --- a/vendor/github.com/golang/snappy/misc/main.cpp +++ /dev/null @@ -1,79 +0,0 @@ -/* -This is a C version of the cmd/snappytool Go program. - -To build the snappytool binary: -g++ main.cpp /usr/lib/libsnappy.a -o snappytool -or, if you have built the C++ snappy library from source: -g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool -after running "make" from your snappy checkout directory. -*/ - -#include -#include -#include -#include - -#include "snappy.h" - -#define N 1000000 - -char dst[N]; -char src[N]; - -int main(int argc, char** argv) { - // Parse args. - if (argc != 2) { - fprintf(stderr, "exactly one of -d or -e must be given\n"); - return 1; - } - bool decode = strcmp(argv[1], "-d") == 0; - bool encode = strcmp(argv[1], "-e") == 0; - if (decode == encode) { - fprintf(stderr, "exactly one of -d or -e must be given\n"); - return 1; - } - - // Read all of stdin into src[:s]. - size_t s = 0; - while (1) { - if (s == N) { - fprintf(stderr, "input too large\n"); - return 1; - } - ssize_t n = read(0, src+s, N-s); - if (n == 0) { - break; - } - if (n < 0) { - fprintf(stderr, "read error: %s\n", strerror(errno)); - // TODO: handle EAGAIN, EINTR? - return 1; - } - s += n; - } - - // Encode or decode src[:s] to dst[:d], and write to stdout. - size_t d = 0; - if (encode) { - if (N < snappy::MaxCompressedLength(s)) { - fprintf(stderr, "input too large after encoding\n"); - return 1; - } - snappy::RawCompress(src, s, dst, &d); - } else { - if (!snappy::GetUncompressedLength(src, s, &d)) { - fprintf(stderr, "could not get uncompressed length\n"); - return 1; - } - if (N < d) { - fprintf(stderr, "input too large after decoding\n"); - return 1; - } - if (!snappy::RawUncompress(src, s, dst)) { - fprintf(stderr, "input was not valid Snappy-compressed data\n"); - return 1; - } - } - write(1, dst, d); - return 0; -} diff --git a/vendor/github.com/golang/snappy/snappy_test.go b/vendor/github.com/golang/snappy/snappy_test.go deleted file mode 100644 index 2712710d..00000000 --- a/vendor/github.com/golang/snappy/snappy_test.go +++ /dev/null @@ -1,1353 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "bytes" - "encoding/binary" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "testing" -) - -var ( - download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") - testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data") - benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data") -) - -// goEncoderShouldMatchCppEncoder is whether to test that the algorithm used by -// Go's encoder matches byte-for-byte what the C++ snappy encoder produces, on -// this GOARCH. There is more than one valid encoding of any given input, and -// there is more than one good algorithm along the frontier of trading off -// throughput for output size. Nonetheless, we presume that the C++ encoder's -// algorithm is a good one and has been tested on a wide range of inputs, so -// matching that exactly should mean that the Go encoder's algorithm is also -// good, without needing to gather our own corpus of test data. -// -// The exact algorithm used by the C++ code is potentially endian dependent, as -// it puns a byte pointer to a uint32 pointer to load, hash and compare 4 bytes -// at a time. The Go implementation is endian agnostic, in that its output is -// the same (as little-endian C++ code), regardless of the CPU's endianness. -// -// Thus, when comparing Go's output to C++ output generated beforehand, such as -// the "testdata/pi.txt.rawsnappy" file generated by C++ code on a little- -// endian system, we can run that test regardless of the runtime.GOARCH value. -// -// When comparing Go's output to dynamically generated C++ output, i.e. the -// result of fork/exec'ing a C++ program, we can run that test only on -// little-endian systems, because the C++ output might be different on -// big-endian systems. The runtime package doesn't export endianness per se, -// but we can restrict this match-C++ test to common little-endian systems. -const goEncoderShouldMatchCppEncoder = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm" - -func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) { - got := maxEncodedLenOfMaxBlockSize - want := MaxEncodedLen(maxBlockSize) - if got != want { - t.Fatalf("got %d, want %d", got, want) - } -} - -func cmp(a, b []byte) error { - if bytes.Equal(a, b) { - return nil - } - if len(a) != len(b) { - return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) - } - for i := range a { - if a[i] != b[i] { - return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) - } - } - return nil -} - -func roundtrip(b, ebuf, dbuf []byte) error { - d, err := Decode(dbuf, Encode(ebuf, b)) - if err != nil { - return fmt.Errorf("decoding error: %v", err) - } - if err := cmp(d, b); err != nil { - return fmt.Errorf("roundtrip mismatch: %v", err) - } - return nil -} - -func TestEmpty(t *testing.T) { - if err := roundtrip(nil, nil, nil); err != nil { - t.Fatal(err) - } -} - -func TestSmallCopy(t *testing.T) { - for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for i := 0; i < 32; i++ { - s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" - if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { - t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) - } - } - } - } -} - -func TestSmallRand(t *testing.T) { - rng := rand.New(rand.NewSource(1)) - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(rng.Intn(256)) - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestSmallRegular(t *testing.T) { - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(i%10 + 'a') - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestInvalidVarint(t *testing.T) { - testCases := []struct { - desc string - input string - }{{ - "invalid varint, final byte has continuation bit set", - "\xff", - }, { - "invalid varint, value overflows uint64", - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00", - }, { - // https://github.com/google/snappy/blob/master/format_description.txt - // says that "the stream starts with the uncompressed length [as a - // varint] (up to a maximum of 2^32 - 1)". - "valid varint (as uint64), but value overflows uint32", - "\x80\x80\x80\x80\x10", - }} - - for _, tc := range testCases { - input := []byte(tc.input) - if _, err := DecodedLen(input); err != ErrCorrupt { - t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err) - } - if _, err := Decode(nil, input); err != ErrCorrupt { - t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err) - } - } -} - -func TestDecode(t *testing.T) { - lit40Bytes := make([]byte, 40) - for i := range lit40Bytes { - lit40Bytes[i] = byte(i) - } - lit40 := string(lit40Bytes) - - testCases := []struct { - desc string - input string - want string - wantErr error - }{{ - `decodedLen=0; valid input`, - "\x00", - "", - nil, - }, { - `decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`, - "\x03" + "\x08\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`, - "\x02" + "\x08\xff\xff\xff", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`, - "\x03" + "\x08\xff\xff", - "", - ErrCorrupt, - }, { - `decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`, - "\x28" + "\x9c" + lit40, - lit40, - nil, - }, { - `decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`, - "\x01" + "\xf0", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`, - "\x03" + "\xf0\x02\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`, - "\x01" + "\xf4\x00", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`, - "\x03" + "\xf4\x02\x00\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`, - "\x01" + "\xf8\x00\x00", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`, - "\x03" + "\xf8\x02\x00\x00\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`, - "\x01" + "\xfc\x00\x00\x00", - "", - ErrCorrupt, - }, { - `decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`, - "\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`, - "\x04" + "\xfc\x02\x00\x00\x00\xff", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`, - "\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`, - "\x04" + "\x01", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`, - "\x04" + "\x02\x00", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`, - "\x04" + "\x03\x00\x00\x00", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`, - "\x04" + "\x0cabcd", - "abcd", - nil, - }, { - `decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`, - "\x0d" + "\x0cabcd" + "\x15\x04", - "abcdabcdabcda", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`, - "\x08" + "\x0cabcd" + "\x01\x04", - "abcdabcd", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`, - "\x08" + "\x0cabcd" + "\x01\x02", - "abcdcdcd", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`, - "\x08" + "\x0cabcd" + "\x01\x01", - "abcddddd", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`, - "\x08" + "\x0cabcd" + "\x01\x00", - "", - ErrCorrupt, - }, { - `decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`, - "\x09" + "\x0cabcd" + "\x01\x04", - "", - ErrCorrupt, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`, - "\x08" + "\x0cabcd" + "\x01\x05", - "", - ErrCorrupt, - }, { - `decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`, - "\x07" + "\x0cabcd" + "\x01\x04", - "", - ErrCorrupt, - }, { - `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`, - "\x06" + "\x0cabcd" + "\x06\x03\x00", - "abcdbc", - nil, - }, { - `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`, - "\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00", - "abcdbc", - nil, - }} - - const ( - // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are - // not present in either the input or the output. It is written to dBuf - // to check that Decode does not write bytes past the end of - // dBuf[:dLen]. - // - // The magic number 37 was chosen because it is prime. A more 'natural' - // number like 32 might lead to a false negative if, for example, a - // byte was incorrectly copied 4*8 bytes later. - notPresentBase = 0xa0 - notPresentLen = 37 - ) - - var dBuf [100]byte -loop: - for i, tc := range testCases { - input := []byte(tc.input) - for _, x := range input { - if notPresentBase <= x && x < notPresentBase+notPresentLen { - t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input) - continue loop - } - } - - dLen, n := binary.Uvarint(input) - if n <= 0 { - t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc) - continue - } - if dLen > uint64(len(dBuf)) { - t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen) - continue - } - - for j := range dBuf { - dBuf[j] = byte(notPresentBase + j%notPresentLen) - } - g, gotErr := Decode(dBuf[:], input) - if got := string(g); got != tc.want || gotErr != tc.wantErr { - t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v", - i, tc.desc, got, gotErr, tc.want, tc.wantErr) - continue - } - for j, x := range dBuf { - if uint64(j) < dLen { - continue - } - if w := byte(notPresentBase + j%notPresentLen); x != w { - t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x", - i, tc.desc, j, x, w, dBuf) - continue loop - } - } - } -} - -func TestDecodeCopy4(t *testing.T) { - dots := strings.Repeat(".", 65536) - - input := strings.Join([]string{ - "\x89\x80\x04", // decodedLen = 65545. - "\x0cpqrs", // 4-byte literal "pqrs". - "\xf4\xff\xff" + dots, // 65536-byte literal dots. - "\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540. - }, "") - - gotBytes, err := Decode(nil, []byte(input)) - if err != nil { - t.Fatal(err) - } - got := string(gotBytes) - want := "pqrs" + dots + "pqrs." - if len(got) != len(want) { - t.Fatalf("got %d bytes, want %d", len(got), len(want)) - } - if got != want { - for i := 0; i < len(got); i++ { - if g, w := got[i], want[i]; g != w { - t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w) - } - } - } -} - -// TestDecodeLengthOffset tests decoding an encoding of the form literal + -// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB". -func TestDecodeLengthOffset(t *testing.T) { - const ( - prefix = "abcdefghijklmnopqr" - suffix = "ABCDEFGHIJKLMNOPQR" - - // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are - // not present in either the input or the output. It is written to - // gotBuf to check that Decode does not write bytes past the end of - // gotBuf[:totalLen]. - // - // The magic number 37 was chosen because it is prime. A more 'natural' - // number like 32 might lead to a false negative if, for example, a - // byte was incorrectly copied 4*8 bytes later. - notPresentBase = 0xa0 - notPresentLen = 37 - ) - var gotBuf, wantBuf, inputBuf [128]byte - for length := 1; length <= 18; length++ { - for offset := 1; offset <= 18; offset++ { - loop: - for suffixLen := 0; suffixLen <= 18; suffixLen++ { - totalLen := len(prefix) + length + suffixLen - - inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen)) - inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1) - inputLen++ - inputLen += copy(inputBuf[inputLen:], prefix) - inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1) - inputBuf[inputLen+1] = byte(offset) - inputBuf[inputLen+2] = 0x00 - inputLen += 3 - if suffixLen > 0 { - inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1) - inputLen++ - inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen]) - } - input := inputBuf[:inputLen] - - for i := range gotBuf { - gotBuf[i] = byte(notPresentBase + i%notPresentLen) - } - got, err := Decode(gotBuf[:], input) - if err != nil { - t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err) - continue - } - - wantLen := 0 - wantLen += copy(wantBuf[wantLen:], prefix) - for i := 0; i < length; i++ { - wantBuf[wantLen] = wantBuf[wantLen-offset] - wantLen++ - } - wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen]) - want := wantBuf[:wantLen] - - for _, x := range input { - if notPresentBase <= x && x < notPresentBase+notPresentLen { - t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x", - length, offset, suffixLen, x, input) - continue loop - } - } - for i, x := range gotBuf { - if i < totalLen { - continue - } - if w := byte(notPresentBase + i%notPresentLen); x != w { - t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+ - "Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x", - length, offset, suffixLen, totalLen, i, x, w, gotBuf) - continue loop - } - } - for _, x := range want { - if notPresentBase <= x && x < notPresentBase+notPresentLen { - t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x", - length, offset, suffixLen, x, want) - continue loop - } - } - - if !bytes.Equal(got, want) { - t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x", - length, offset, suffixLen, input, got, want) - continue - } - } - } - } -} - -const ( - goldenText = "Mark.Twain-Tom.Sawyer.txt" - goldenCompressed = goldenText + ".rawsnappy" -) - -func TestDecodeGoldenInput(t *testing.T) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - got, err := Decode(nil, src) - if err != nil { - t.Fatalf("Decode: %v", err) - } - want, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - if err := cmp(got, want); err != nil { - t.Fatal(err) - } -} - -func TestEncodeGoldenInput(t *testing.T) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - got := Encode(nil, src) - want, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - if err := cmp(got, want); err != nil { - t.Fatal(err) - } -} - -func TestExtendMatchGoldenInput(t *testing.T) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - for i, tc := range extendMatchGoldenTestCases { - got := extendMatch(src, tc.i, tc.j) - if got != tc.want { - t.Errorf("test #%d: i, j = %5d, %5d: got %5d (= j + %6d), want %5d (= j + %6d)", - i, tc.i, tc.j, got, got-tc.j, tc.want, tc.want-tc.j) - } - } -} - -func TestExtendMatch(t *testing.T) { - // ref is a simple, reference implementation of extendMatch. - ref := func(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j - } - - nums := []int{0, 1, 2, 7, 8, 9, 29, 30, 31, 32, 33, 34, 38, 39, 40} - for yIndex := 40; yIndex > 30; yIndex-- { - xxx := bytes.Repeat([]byte("x"), 40) - if yIndex < len(xxx) { - xxx[yIndex] = 'y' - } - for _, i := range nums { - for _, j := range nums { - if i >= j { - continue - } - got := extendMatch(xxx, i, j) - want := ref(xxx, i, j) - if got != want { - t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want) - } - } - } - } -} - -const snappytoolCmdName = "cmd/snappytool/snappytool" - -func skipTestSameEncodingAsCpp() (msg string) { - if !goEncoderShouldMatchCppEncoder { - return fmt.Sprintf("skipping testing that the encoding is byte-for-byte identical to C++: GOARCH=%s", runtime.GOARCH) - } - if _, err := os.Stat(snappytoolCmdName); err != nil { - return fmt.Sprintf("could not find snappytool: %v", err) - } - return "" -} - -func runTestSameEncodingAsCpp(src []byte) error { - got := Encode(nil, src) - - cmd := exec.Command(snappytoolCmdName, "-e") - cmd.Stdin = bytes.NewReader(src) - want, err := cmd.Output() - if err != nil { - return fmt.Errorf("could not run snappytool: %v", err) - } - return cmp(got, want) -} - -func TestSameEncodingAsCppShortCopies(t *testing.T) { - if msg := skipTestSameEncodingAsCpp(); msg != "" { - t.Skip(msg) - } - src := bytes.Repeat([]byte{'a'}, 20) - for i := 0; i <= len(src); i++ { - if err := runTestSameEncodingAsCpp(src[:i]); err != nil { - t.Errorf("i=%d: %v", i, err) - } - } -} - -func TestSameEncodingAsCppLongFiles(t *testing.T) { - if msg := skipTestSameEncodingAsCpp(); msg != "" { - t.Skip(msg) - } - bDir := filepath.FromSlash(*benchdataDir) - failed := false - for i, tf := range testFiles { - if err := downloadBenchmarkFiles(t, tf.filename); err != nil { - t.Fatalf("failed to download testdata: %s", err) - } - data := readFile(t, filepath.Join(bDir, tf.filename)) - if n := tf.sizeLimit; 0 < n && n < len(data) { - data = data[:n] - } - if err := runTestSameEncodingAsCpp(data); err != nil { - t.Errorf("i=%d: %v", i, err) - failed = true - } - } - if failed { - t.Errorf("was the snappytool program built against the C++ snappy library version " + - "d53de187 or later, commited on 2016-04-05? See " + - "https://github.com/google/snappy/commit/d53de18799418e113e44444252a39b12a0e4e0cc") - } -} - -// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm -// described in decode_amd64.s and its claim of a 10 byte overrun worst case. -func TestSlowForwardCopyOverrun(t *testing.T) { - const base = 100 - - for length := 1; length < 18; length++ { - for offset := 1; offset < 18; offset++ { - highWaterMark := base - d := base - l := length - o := offset - - // makeOffsetAtLeast8 - for o < 8 { - if end := d + 8; highWaterMark < end { - highWaterMark = end - } - l -= o - d += o - o += o - } - - // fixUpSlowForwardCopy - a := d - d += l - - // finishSlowForwardCopy - for l > 0 { - if end := a + 8; highWaterMark < end { - highWaterMark = end - } - a += 8 - l -= 8 - } - - dWant := base + length - overrun := highWaterMark - dWant - if d != dWant || overrun < 0 || 10 < overrun { - t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])", - length, offset, d, overrun, dWant) - } - } - } -} - -// TestEncodeNoiseThenRepeats encodes input for which the first half is very -// incompressible and the second half is very compressible. The encoded form's -// length should be closer to 50% of the original length than 100%. -func TestEncodeNoiseThenRepeats(t *testing.T) { - for _, origLen := range []int{256 * 1024, 2048 * 1024} { - src := make([]byte, origLen) - rng := rand.New(rand.NewSource(1)) - firstHalf, secondHalf := src[:origLen/2], src[origLen/2:] - for i := range firstHalf { - firstHalf[i] = uint8(rng.Intn(256)) - } - for i := range secondHalf { - secondHalf[i] = uint8(i >> 8) - } - dst := Encode(nil, src) - if got, want := len(dst), origLen*3/4; got >= want { - t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want) - } - } -} - -func TestFramingFormat(t *testing.T) { - // src is comprised of alternating 1e5-sized sequences of random - // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen - // because it is larger than maxBlockSize (64k). - src := make([]byte, 1e6) - rng := rand.New(rand.NewSource(1)) - for i := 0; i < 10; i++ { - if i%2 == 0 { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(rng.Intn(256)) - } - } else { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(i) - } - } - } - - buf := new(bytes.Buffer) - if _, err := NewWriter(buf).Write(src); err != nil { - t.Fatalf("Write: encoding: %v", err) - } - dst, err := ioutil.ReadAll(NewReader(buf)) - if err != nil { - t.Fatalf("ReadAll: decoding: %v", err) - } - if err := cmp(dst, src); err != nil { - t.Fatal(err) - } -} - -func TestWriterGoldenOutput(t *testing.T) { - buf := new(bytes.Buffer) - w := NewBufferedWriter(buf) - defer w.Close() - w.Write([]byte("abcd")) // Not compressible. - w.Flush() - w.Write(bytes.Repeat([]byte{'A'}, 150)) // Compressible. - w.Flush() - // The next chunk is also compressible, but a naive, greedy encoding of the - // overall length 67 copy as a length 64 copy (the longest expressible as a - // tagCopy1 or tagCopy2) plus a length 3 remainder would be two 3-byte - // tagCopy2 tags (6 bytes), since the minimum length for a tagCopy1 is 4 - // bytes. Instead, we could do it shorter, in 5 bytes: a 3-byte tagCopy2 - // (of length 60) and a 2-byte tagCopy1 (of length 7). - w.Write(bytes.Repeat([]byte{'B'}, 68)) - w.Write([]byte("efC")) // Not compressible. - w.Write(bytes.Repeat([]byte{'C'}, 20)) // Compressible. - w.Write(bytes.Repeat([]byte{'B'}, 20)) // Compressible. - w.Write([]byte("g")) // Not compressible. - w.Flush() - - got := buf.String() - want := strings.Join([]string{ - magicChunk, - "\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum). - "\x68\x10\xe6\xb6", // Checksum. - "\x61\x62\x63\x64", // Uncompressed payload: "abcd". - "\x00\x11\x00\x00", // Compressed chunk, 17 bytes long (including 4 byte checksum). - "\x5f\xeb\xf2\x10", // Checksum. - "\x96\x01", // Compressed payload: Uncompressed length (varint encoded): 150. - "\x00\x41", // Compressed payload: tagLiteral, length=1, "A". - "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. - "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. - "\x52\x01\x00", // Compressed payload: tagCopy2, length=21, offset=1. - "\x00\x18\x00\x00", // Compressed chunk, 24 bytes long (including 4 byte checksum). - "\x30\x85\x69\xeb", // Checksum. - "\x70", // Compressed payload: Uncompressed length (varint encoded): 112. - "\x00\x42", // Compressed payload: tagLiteral, length=1, "B". - "\xee\x01\x00", // Compressed payload: tagCopy2, length=60, offset=1. - "\x0d\x01", // Compressed payload: tagCopy1, length=7, offset=1. - "\x08\x65\x66\x43", // Compressed payload: tagLiteral, length=3, "efC". - "\x4e\x01\x00", // Compressed payload: tagCopy2, length=20, offset=1. - "\x4e\x5a\x00", // Compressed payload: tagCopy2, length=20, offset=90. - "\x00\x67", // Compressed payload: tagLiteral, length=1, "g". - }, "") - if got != want { - t.Fatalf("\ngot: % x\nwant: % x", got, want) - } -} - -func TestEmitLiteral(t *testing.T) { - testCases := []struct { - length int - want string - }{ - {1, "\x00"}, - {2, "\x04"}, - {59, "\xe8"}, - {60, "\xec"}, - {61, "\xf0\x3c"}, - {62, "\xf0\x3d"}, - {254, "\xf0\xfd"}, - {255, "\xf0\xfe"}, - {256, "\xf0\xff"}, - {257, "\xf4\x00\x01"}, - {65534, "\xf4\xfd\xff"}, - {65535, "\xf4\xfe\xff"}, - {65536, "\xf4\xff\xff"}, - } - - dst := make([]byte, 70000) - nines := bytes.Repeat([]byte{0x99}, 65536) - for _, tc := range testCases { - lit := nines[:tc.length] - n := emitLiteral(dst, lit) - if !bytes.HasSuffix(dst[:n], lit) { - t.Errorf("length=%d: did not end with that many literal bytes", tc.length) - continue - } - got := string(dst[:n-tc.length]) - if got != tc.want { - t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want) - continue - } - } -} - -func TestEmitCopy(t *testing.T) { - testCases := []struct { - offset int - length int - want string - }{ - {8, 04, "\x01\x08"}, - {8, 11, "\x1d\x08"}, - {8, 12, "\x2e\x08\x00"}, - {8, 13, "\x32\x08\x00"}, - {8, 59, "\xea\x08\x00"}, - {8, 60, "\xee\x08\x00"}, - {8, 61, "\xf2\x08\x00"}, - {8, 62, "\xf6\x08\x00"}, - {8, 63, "\xfa\x08\x00"}, - {8, 64, "\xfe\x08\x00"}, - {8, 65, "\xee\x08\x00\x05\x08"}, - {8, 66, "\xee\x08\x00\x09\x08"}, - {8, 67, "\xee\x08\x00\x0d\x08"}, - {8, 68, "\xfe\x08\x00\x01\x08"}, - {8, 69, "\xfe\x08\x00\x05\x08"}, - {8, 80, "\xfe\x08\x00\x3e\x08\x00"}, - - {256, 04, "\x21\x00"}, - {256, 11, "\x3d\x00"}, - {256, 12, "\x2e\x00\x01"}, - {256, 13, "\x32\x00\x01"}, - {256, 59, "\xea\x00\x01"}, - {256, 60, "\xee\x00\x01"}, - {256, 61, "\xf2\x00\x01"}, - {256, 62, "\xf6\x00\x01"}, - {256, 63, "\xfa\x00\x01"}, - {256, 64, "\xfe\x00\x01"}, - {256, 65, "\xee\x00\x01\x25\x00"}, - {256, 66, "\xee\x00\x01\x29\x00"}, - {256, 67, "\xee\x00\x01\x2d\x00"}, - {256, 68, "\xfe\x00\x01\x21\x00"}, - {256, 69, "\xfe\x00\x01\x25\x00"}, - {256, 80, "\xfe\x00\x01\x3e\x00\x01"}, - - {2048, 04, "\x0e\x00\x08"}, - {2048, 11, "\x2a\x00\x08"}, - {2048, 12, "\x2e\x00\x08"}, - {2048, 13, "\x32\x00\x08"}, - {2048, 59, "\xea\x00\x08"}, - {2048, 60, "\xee\x00\x08"}, - {2048, 61, "\xf2\x00\x08"}, - {2048, 62, "\xf6\x00\x08"}, - {2048, 63, "\xfa\x00\x08"}, - {2048, 64, "\xfe\x00\x08"}, - {2048, 65, "\xee\x00\x08\x12\x00\x08"}, - {2048, 66, "\xee\x00\x08\x16\x00\x08"}, - {2048, 67, "\xee\x00\x08\x1a\x00\x08"}, - {2048, 68, "\xfe\x00\x08\x0e\x00\x08"}, - {2048, 69, "\xfe\x00\x08\x12\x00\x08"}, - {2048, 80, "\xfe\x00\x08\x3e\x00\x08"}, - } - - dst := make([]byte, 1024) - for _, tc := range testCases { - n := emitCopy(dst, tc.offset, tc.length) - got := string(dst[:n]) - if got != tc.want { - t.Errorf("offset=%d, length=%d:\ngot % x\nwant % x", tc.offset, tc.length, got, tc.want) - } - } -} - -func TestNewBufferedWriter(t *testing.T) { - // Test all 32 possible sub-sequences of these 5 input slices. - // - // Their lengths sum to 400,000, which is over 6 times the Writer ibuf - // capacity: 6 * maxBlockSize is 393,216. - inputs := [][]byte{ - bytes.Repeat([]byte{'a'}, 40000), - bytes.Repeat([]byte{'b'}, 150000), - bytes.Repeat([]byte{'c'}, 60000), - bytes.Repeat([]byte{'d'}, 120000), - bytes.Repeat([]byte{'e'}, 30000), - } -loop: - for i := 0; i < 1< 0; { - i := copy(x, src) - x = x[i:] - } - return dst -} - -func benchWords(b *testing.B, n int, decode bool) { - // Note: the file is OS-language dependent so the resulting values are not - // directly comparable for non-US-English OS installations. - data := expand(readFile(b, "/usr/share/dict/words"), n) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) } -func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) } -func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } -func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } -func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } -func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } -func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) } -func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) } -func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } -func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } -func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } -func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } - -func BenchmarkRandomEncode(b *testing.B) { - rng := rand.New(rand.NewSource(1)) - data := make([]byte, 1<<20) - for i := range data { - data[i] = uint8(rng.Intn(256)) - } - benchEncode(b, data) -} - -// testFiles' values are copied directly from -// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc -// The label field is unused in snappy-go. -var testFiles = []struct { - label string - filename string - sizeLimit int -}{ - {"html", "html", 0}, - {"urls", "urls.10K", 0}, - {"jpg", "fireworks.jpeg", 0}, - {"jpg_200", "fireworks.jpeg", 200}, - {"pdf", "paper-100k.pdf", 0}, - {"html4", "html_x_4", 0}, - {"txt1", "alice29.txt", 0}, - {"txt2", "asyoulik.txt", 0}, - {"txt3", "lcet10.txt", 0}, - {"txt4", "plrabn12.txt", 0}, - {"pb", "geo.protodata", 0}, - {"gaviota", "kppkn.gtb", 0}, -} - -const ( - // The benchmark data files are at this canonical URL. - benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" -) - -func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) { - bDir := filepath.FromSlash(*benchdataDir) - filename := filepath.Join(bDir, basename) - if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { - return nil - } - - if !*download { - b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b)) - } - // Download the official snappy C++ implementation reference test data - // files for benchmarking. - if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) { - return fmt.Errorf("failed to create %s: %s", bDir, err) - } - - f, err := os.Create(filename) - if err != nil { - return fmt.Errorf("failed to create %s: %s", filename, err) - } - defer f.Close() - defer func() { - if errRet != nil { - os.Remove(filename) - } - }() - url := benchURL + basename - resp, err := http.Get(url) - if err != nil { - return fmt.Errorf("failed to download %s: %s", url, err) - } - defer resp.Body.Close() - if s := resp.StatusCode; s != http.StatusOK { - return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) - } - _, err = io.Copy(f, resp.Body) - if err != nil { - return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) - } - return nil -} - -func benchFile(b *testing.B, i int, decode bool) { - if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil { - b.Fatalf("failed to download testdata: %s", err) - } - bDir := filepath.FromSlash(*benchdataDir) - data := readFile(b, filepath.Join(bDir, testFiles[i].filename)) - if n := testFiles[i].sizeLimit; 0 < n && n < len(data) { - data = data[:n] - } - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -// Naming convention is kept similar to what snappy's C++ implementation uses. -func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } -func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } -func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } -func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } -func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } -func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } -func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } -func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } -func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } -func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } -func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } -func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } -func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } -func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } -func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } -func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } -func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } -func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } -func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } -func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } -func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } -func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } -func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } -func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } - -func BenchmarkExtendMatch(b *testing.B) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - b.Fatalf("ReadFile: %v", err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, tc := range extendMatchGoldenTestCases { - extendMatch(src, tc.i, tc.j) - } - } -} diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt deleted file mode 100644 index 86a18750..00000000 --- a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt +++ /dev/null @@ -1,396 +0,0 @@ -Produced by David Widger. The previous edition was updated by Jose -Menendez. - - - - - - THE ADVENTURES OF TOM SAWYER - BY - MARK TWAIN - (Samuel Langhorne Clemens) - - - - - P R E F A C E - -MOST of the adventures recorded in this book really occurred; one or -two were experiences of my own, the rest those of boys who were -schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but -not from an individual--he is a combination of the characteristics of -three boys whom I knew, and therefore belongs to the composite order of -architecture. - -The odd superstitions touched upon were all prevalent among children -and slaves in the West at the period of this story--that is to say, -thirty or forty years ago. - -Although my book is intended mainly for the entertainment of boys and -girls, I hope it will not be shunned by men and women on that account, -for part of my plan has been to try to pleasantly remind adults of what -they once were themselves, and of how they felt and thought and talked, -and what queer enterprises they sometimes engaged in. - - THE AUTHOR. - -HARTFORD, 1876. - - - - T O M S A W Y E R - - - -CHAPTER I - -"TOM!" - -No answer. - -"TOM!" - -No answer. - -"What's gone with that boy, I wonder? You TOM!" - -No answer. - -The old lady pulled her spectacles down and looked over them about the -room; then she put them up and looked out under them. She seldom or -never looked THROUGH them for so small a thing as a boy; they were her -state pair, the pride of her heart, and were built for "style," not -service--she could have seen through a pair of stove-lids just as well. -She looked perplexed for a moment, and then said, not fiercely, but -still loud enough for the furniture to hear: - -"Well, I lay if I get hold of you I'll--" - -She did not finish, for by this time she was bending down and punching -under the bed with the broom, and so she needed breath to punctuate the -punches with. She resurrected nothing but the cat. - -"I never did see the beat of that boy!" - -She went to the open door and stood in it and looked out among the -tomato vines and "jimpson" weeds that constituted the garden. No Tom. -So she lifted up her voice at an angle calculated for distance and -shouted: - -"Y-o-u-u TOM!" - -There was a slight noise behind her and she turned just in time to -seize a small boy by the slack of his roundabout and arrest his flight. - -"There! I might 'a' thought of that closet. What you been doing in -there?" - -"Nothing." - -"Nothing! Look at your hands. And look at your mouth. What IS that -truck?" - -"I don't know, aunt." - -"Well, I know. It's jam--that's what it is. Forty times I've said if -you didn't let that jam alone I'd skin you. Hand me that switch." - -The switch hovered in the air--the peril was desperate-- - -"My! Look behind you, aunt!" - -The old lady whirled round, and snatched her skirts out of danger. The -lad fled on the instant, scrambled up the high board-fence, and -disappeared over it. - -His aunt Polly stood surprised a moment, and then broke into a gentle -laugh. - -"Hang the boy, can't I never learn anything? Ain't he played me tricks -enough like that for me to be looking out for him by this time? But old -fools is the biggest fools there is. Can't learn an old dog new tricks, -as the saying is. But my goodness, he never plays them alike, two days, -and how is a body to know what's coming? He 'pears to know just how -long he can torment me before I get my dander up, and he knows if he -can make out to put me off for a minute or make me laugh, it's all down -again and I can't hit him a lick. I ain't doing my duty by that boy, -and that's the Lord's truth, goodness knows. Spare the rod and spile -the child, as the Good Book says. I'm a laying up sin and suffering for -us both, I know. He's full of the Old Scratch, but laws-a-me! he's my -own dead sister's boy, poor thing, and I ain't got the heart to lash -him, somehow. Every time I let him off, my conscience does hurt me so, -and every time I hit him my old heart most breaks. Well-a-well, man -that is born of woman is of few days and full of trouble, as the -Scripture says, and I reckon it's so. He'll play hookey this evening, * -and [* Southwestern for "afternoon"] I'll just be obleeged to make him -work, to-morrow, to punish him. It's mighty hard to make him work -Saturdays, when all the boys is having holiday, but he hates work more -than he hates anything else, and I've GOT to do some of my duty by him, -or I'll be the ruination of the child." - -Tom did play hookey, and he had a very good time. He got back home -barely in season to help Jim, the small colored boy, saw next-day's -wood and split the kindlings before supper--at least he was there in -time to tell his adventures to Jim while Jim did three-fourths of the -work. Tom's younger brother (or rather half-brother) Sid was already -through with his part of the work (picking up chips), for he was a -quiet boy, and had no adventurous, troublesome ways. - -While Tom was eating his supper, and stealing sugar as opportunity -offered, Aunt Polly asked him questions that were full of guile, and -very deep--for she wanted to trap him into damaging revealments. Like -many other simple-hearted souls, it was her pet vanity to believe she -was endowed with a talent for dark and mysterious diplomacy, and she -loved to contemplate her most transparent devices as marvels of low -cunning. Said she: - -"Tom, it was middling warm in school, warn't it?" - -"Yes'm." - -"Powerful warm, warn't it?" - -"Yes'm." - -"Didn't you want to go in a-swimming, Tom?" - -A bit of a scare shot through Tom--a touch of uncomfortable suspicion. -He searched Aunt Polly's face, but it told him nothing. So he said: - -"No'm--well, not very much." - -The old lady reached out her hand and felt Tom's shirt, and said: - -"But you ain't too warm now, though." And it flattered her to reflect -that she had discovered that the shirt was dry without anybody knowing -that that was what she had in her mind. But in spite of her, Tom knew -where the wind lay, now. So he forestalled what might be the next move: - -"Some of us pumped on our heads--mine's damp yet. See?" - -Aunt Polly was vexed to think she had overlooked that bit of -circumstantial evidence, and missed a trick. Then she had a new -inspiration: - -"Tom, you didn't have to undo your shirt collar where I sewed it, to -pump on your head, did you? Unbutton your jacket!" - -The trouble vanished out of Tom's face. He opened his jacket. His -shirt collar was securely sewed. - -"Bother! Well, go 'long with you. I'd made sure you'd played hookey -and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a -singed cat, as the saying is--better'n you look. THIS time." - -She was half sorry her sagacity had miscarried, and half glad that Tom -had stumbled into obedient conduct for once. - -But Sidney said: - -"Well, now, if I didn't think you sewed his collar with white thread, -but it's black." - -"Why, I did sew it with white! Tom!" - -But Tom did not wait for the rest. As he went out at the door he said: - -"Siddy, I'll lick you for that." - -In a safe place Tom examined two large needles which were thrust into -the lapels of his jacket, and had thread bound about them--one needle -carried white thread and the other black. He said: - -"She'd never noticed if it hadn't been for Sid. Confound it! sometimes -she sews it with white, and sometimes she sews it with black. I wish to -geeminy she'd stick to one or t'other--I can't keep the run of 'em. But -I bet you I'll lam Sid for that. I'll learn him!" - -He was not the Model Boy of the village. He knew the model boy very -well though--and loathed him. - -Within two minutes, or even less, he had forgotten all his troubles. -Not because his troubles were one whit less heavy and bitter to him -than a man's are to a man, but because a new and powerful interest bore -them down and drove them out of his mind for the time--just as men's -misfortunes are forgotten in the excitement of new enterprises. This -new interest was a valued novelty in whistling, which he had just -acquired from a negro, and he was suffering to practise it undisturbed. -It consisted in a peculiar bird-like turn, a sort of liquid warble, -produced by touching the tongue to the roof of the mouth at short -intervals in the midst of the music--the reader probably remembers how -to do it, if he has ever been a boy. Diligence and attention soon gave -him the knack of it, and he strode down the street with his mouth full -of harmony and his soul full of gratitude. He felt much as an -astronomer feels who has discovered a new planet--no doubt, as far as -strong, deep, unalloyed pleasure is concerned, the advantage was with -the boy, not the astronomer. - -The summer evenings were long. It was not dark, yet. Presently Tom -checked his whistle. A stranger was before him--a boy a shade larger -than himself. A new-comer of any age or either sex was an impressive -curiosity in the poor little shabby village of St. Petersburg. This boy -was well dressed, too--well dressed on a week-day. This was simply -astounding. His cap was a dainty thing, his close-buttoned blue cloth -roundabout was new and natty, and so were his pantaloons. He had shoes -on--and it was only Friday. He even wore a necktie, a bright bit of -ribbon. He had a citified air about him that ate into Tom's vitals. The -more Tom stared at the splendid marvel, the higher he turned up his -nose at his finery and the shabbier and shabbier his own outfit seemed -to him to grow. Neither boy spoke. If one moved, the other moved--but -only sidewise, in a circle; they kept face to face and eye to eye all -the time. Finally Tom said: - -"I can lick you!" - -"I'd like to see you try it." - -"Well, I can do it." - -"No you can't, either." - -"Yes I can." - -"No you can't." - -"I can." - -"You can't." - -"Can!" - -"Can't!" - -An uncomfortable pause. Then Tom said: - -"What's your name?" - -"'Tisn't any of your business, maybe." - -"Well I 'low I'll MAKE it my business." - -"Well why don't you?" - -"If you say much, I will." - -"Much--much--MUCH. There now." - -"Oh, you think you're mighty smart, DON'T you? I could lick you with -one hand tied behind me, if I wanted to." - -"Well why don't you DO it? You SAY you can do it." - -"Well I WILL, if you fool with me." - -"Oh yes--I've seen whole families in the same fix." - -"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!" - -"You can lump that hat if you don't like it. I dare you to knock it -off--and anybody that'll take a dare will suck eggs." - -"You're a liar!" - -"You're another." - -"You're a fighting liar and dasn't take it up." - -"Aw--take a walk!" - -"Say--if you give me much more of your sass I'll take and bounce a -rock off'n your head." - -"Oh, of COURSE you will." - -"Well I WILL." - -"Well why don't you DO it then? What do you keep SAYING you will for? -Why don't you DO it? It's because you're afraid." - -"I AIN'T afraid." - -"You are." - -"I ain't." - -"You are." - -Another pause, and more eying and sidling around each other. Presently -they were shoulder to shoulder. Tom said: - -"Get away from here!" - -"Go away yourself!" - -"I won't." - -"I won't either." - -So they stood, each with a foot placed at an angle as a brace, and -both shoving with might and main, and glowering at each other with -hate. But neither could get an advantage. After struggling till both -were hot and flushed, each relaxed his strain with watchful caution, -and Tom said: - -"You're a coward and a pup. I'll tell my big brother on you, and he -can thrash you with his little finger, and I'll make him do it, too." - -"What do I care for your big brother? I've got a brother that's bigger -than he is--and what's more, he can throw him over that fence, too." -[Both brothers were imaginary.] - -"That's a lie." - -"YOUR saying so don't make it so." - -Tom drew a line in the dust with his big toe, and said: - -"I dare you to step over that, and I'll lick you till you can't stand -up. Anybody that'll take a dare will steal sheep." - -The new boy stepped over promptly, and said: - -"Now you said you'd do it, now let's see you do it." - -"Don't you crowd me now; you better look out." - -"Well, you SAID you'd do it--why don't you do it?" - -"By jingo! for two cents I WILL do it." - -The new boy took two broad coppers out of his pocket and held them out -with derision. Tom struck them to the ground. In an instant both boys -were rolling and tumbling in the dirt, gripped together like cats; and -for the space of a minute they tugged and tore at each other's hair and -clothes, punched and scratched each other's nose, and covered -themselves with dust and glory. Presently the confusion took form, and -through the fog of battle Tom appeared, seated astride the new boy, and -pounding him with his fists. "Holler 'nuff!" said he. - -The boy only struggled to free himself. He was crying--mainly from rage. - -"Holler 'nuff!"--and the pounding went on. - -At last the stranger got out a smothered "'Nuff!" and Tom let him up -and said: - -"Now that'll learn you. Better look out who you're fooling with next -time." - -The new boy went off brushing the dust from his clothes, sobbing, -snuffling, and occasionally looking back and shaking his head and -threatening what he would do to Tom the "next time he caught him out." -To which Tom responded with jeers, and started off in high feather, and -as soon as his back was turned the new boy snatched up a stone, threw -it and hit him between the shoulders and then turned tail and ran like -an antelope. Tom chased the traitor home, and thus found out where he -lived. He then held a position at the gate for some time, daring the -enemy to come outside, but the enemy only made faces at him through the -window and declined. At last the enemy's mother appeared, and called -Tom a bad, vicious, vulgar child, and ordered him away. So he went -away; but he said he "'lowed" to "lay" for that boy. - -He got home pretty late that night, and when he climbed cautiously in -at the window, he uncovered an ambuscade, in the person of his aunt; -and when she saw the state his clothes were in her resolution to turn -his Saturday holiday into captivity at hard labor became adamantine in -its firmness. diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy deleted file mode 100644 index 9c56d985888e48a9967e187523f0e3326330aeca..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9871 zcmW++d3+ni*`6u8u)A7~rM0|~WyP6QqBu?@lY=8Ar;;dGG#9aRhm?^tk~Wrh#qP>N zQ1~GlQvw7CE&Q640B$iMX|9tMwyz{)z z`#jI+Pu3f296Mjj@jT5o=rT5F=II7AU*t{??Jsd!b@-rZ*Idf;rf1p~tuvR_s(I#d zarWGEY?mu5xy7wKzoT}^s4@KYtwyn^>W(3dL`{kZP=7vyJ{wy zjghcqlQS7jS(#e zmZs@)nxac-T2WT6?(3&^fqJk`mLKGnS97>a9iFqDJZ#9c;8&(gv$j0|KV{`|gW9=V z&2e~s9<|~5wxf7;o7TC*DZjAF9g|x*cmzDO0)Jb#690QRJk^6QV6)f`XYGbd>m~Wj zDT<_YJpY-0H!a>nduzD?y4AHM2i^#*q+E4y}r zW^0~e*`7RQF@yiG@+MTj#>&ZLiQ{Ec7|q$0tZ6aN_|*UD)?A4>Ea$-*UC+#ouTk+z z4(Dw@o>`$8I$-g7f-);_bS-vx!G;}WdD*s#F7<5OhPQ;PwrhHv8gr3@d!x+BmEhZ) zu~zC)l+;Dj?0lZOHK>8>r7yyZ617|jYFMj|pSiJ2g6gGzyoQyb%9K^$xfK4toOigT z;2mMN%m$%u@x7GO4dc~fo;YM&BG`HPwbP&$u4g-qWYR0aX3z@HrY>tFVEv}!L7g31 zu(7mcktmzOUc|f?epI z^F!FaQZ${in}CXx>?)_GN9$&}OrgwB59*d`mc_X#0j%&$iPmkbNU%fkuEi{uvul=@ zP@s0S${Y`GoewXsmf>tl0QxM4e|cW=oCZE+W3|kg%PbEN+Ga;}X>V#xe|jy=C`t_a(ugU_DjSgFLL2mK z4x#7<*g=;|i|iw{ZhEEe{k?$s1cj2;ZD@375#40hhVjC+D1G9xa-No19_mypmjN#& z0JmG^IgjPau;RR3&q!0s(17qedyuEIp$cVLyOw?}q&T)+nP(Iwi@L*4$rIEHpe=q+ z@GQ0F(Hg!EPoG)op?%QYP`$ieY3>&+jyL&dg!_Bfu3mR`uh^Y_Dc}N@D(n)(n#aQc zN;oKn-x2Gmk(qxQEJkrXpei^|)7kmw0Ms& zo`^9CI-5I#W{xM5_&Mmp8f>z}2Jzjg*92&}9f*k>Z=MOD9^}cgnRn^-8v92SXn~i@ zJqjN4AED4l0Fn!NUbG|zs9op3TJTE7T$P#m1io|%EtuTN@p2EhWS)LbhRK1t>}){`}ZwY8IH<7+mLvV9nGBvc;u>S2GT(0A)oySKzL=B-0TES{&!n+}OV)XfJFT8tzeP zj(IE~WN79L{2R&TTWWiOi)}Y6RoAv+P-LEO@(}ajWUSEzsD>XfZeLbp!1-1W z1tFRB_$ZJEN@c^b;X{&J zvhlZ#4?~-hcCwbNy(;vVMt^MFGm}xbY!*u%@QCRednG8H`6_I{AqwP@hMbNLYRo!2so{4J6pevxa zjML`=1_0B*e{K&=S^4<_FLm|a;U^s~zr)8k#R+k3@!zN242`Ep@ zU9b#Kka}olc$>PNRgy>VLS3X?7MPcs5isG2%a7h}EL2@US#xq201OaR*uXf_@HVN; zJ+Xn)5)8HBJENH{7$XCwg&%_l_8Mv9GQ26|0*~ZM_{!m%Wg^hhaP9DY>GcxyB)%5$ z=6b;_$B)m?b07`}x;2@E9jADog<@HA1}vb#o4@WN`!_A>K2;1SbmO z0DzM$jL&_sNELtrt5!L~f5NW|)J)BldQDe6Akj58c}_8ZB)GNrev9&}z9~X;*aOhW zfKdkB2(tj+EWyM|U7_}ZfDv~b4G1}AT)+Et6^4bwENtgp1)DEbjJZs?IW}5(G>V~luD6joYPJOOrWTDF?bA3?D;v(A1 zX_s*q(R{LjwvWE2!c0?0$%if2GWlFaj%F#S(sZ>eea~DbR2r$XLNGYS`TZ+A3zv{P zdbN`@wFkKOVg@pkNe~@NuVMS&d zFG+ct!Mg(h=cyKwi60X597F0bK*w? z*Y5r{d0%c@FzY99H0ck?Z>5k&E4J%t2}vD*Ee`LNU}1keAnsS0E*z+@iO_? zPgckQ(98oo5|O8HknDh_1f8Bvs9UDfzR{vk=f+lK8NZlGW2^v0uxz0G^E6jBXGKB3 z+1WDZtwaUT2q7C?^*=YB~K`+zEH`ALC*9o3L8Qt?T;;Zi3%lgQokTi`w{nt5xYC8 zQeJkA`GS>y=BRY`>VAAVFmt-aXkL;@bxDft$e*KZC5mcGUTu*;93MU^*p;zqL;I7w z`&w~m_@C)g^VM|4xG{Yj%987)X(f<$N|rew1IW{o>Vl{oy*6rtI4M`@IjNr+qW-+3 z=IpXPS~={S@3J~H^@Uy%TC~em>TDl=Lv^cVGxJplC^ug=NjiYCJD;5bLLRs@nPlGZ zEy#^8CKdVaM+8fO(=fuS2U{m3f)@)<(C|~ zB?Ow+wQD3DBnyM^er>uOs)B4AWcW#(3xPc|`Jk5aBL&9-Ak6G~r{M>&=;@0!{3A8b zoPl^2h`}YtH1cIAnm4Ou5JkC)8g+p@%b-5_zrrAbJYK1iPF1{cWO~4JK*^6@9R-_~ z2kV|ABk=g2Cq=4ksXTQR0>@PEz=brY4aIZ7zWT3K0nNo zF!L*wkyYbQ#@o~s&6-+8Q9(%M#?BMm(&6heN22gMz$o>|p4Mg~50ccb0Ev1Gx*U!a zh&Tt4Lyyu+?r9-MTLqp!FEchDF><*gfp<0@1F*`z)COcla;iZ4nhOvmzxEju1FP(= zO7gh|iM6IT2$kB@VIN+sWqc|?Gl$FMWQBawM{ZE#uV@cyLch&kKrZ=9Yvm+?nIa<( z$;JfzCP@NOY77V!s_j*g<_hvEy&d+1oc1_G3e&tB_@b||# zYcRc6>a#22`C*7{sx^SdcQnDG+DHBnG^U$OlB;xRX7+?sr45oHLLb*=l6`aWGiXq& z2iXY~)a^jqOU4xPh>xwu?`H;AC^^%~)hh5Z%QIP-0%C*75Yo<-wUn%#XxXD=7|Nukhl}PQrwyLoMjrJ=hUIaM z+U1b_fw9~1ivgy%aDMA#kV5SI(J1?sw#64Rq3&RVz6mO1s&=m*me${!>C@Lr5w#V+ zHMR~Y4CYrO^@lJ-BSn@oHBXNJCy)cDV2_PNNwkC1QxUqAK`*()_a z5bCVyAzM4xMs~FZz^$ZQjY{~)P-^EkEz=smVCTy!tRRo8WPy(i`o)Lk3}RX~C}gbR zze=pY$qA?u7Y8MNA!EM>sd+0203wQ9n;jyT>jBXExzc;bBnY}lgBV-2oF9a9^|?1T z9Xh~_YsmRBESYA>J=)ZRdH}e@wu0M`l6)C+iQkug34by^zk29W+bW1VB-1mrIX*I75nN>$_=>|5$c}cf@bV7% z5b2l2sYCb9o{Id}B_I|7uMJF~6gk<{jv9D6*{G@Tf`R78_^9zX>Jpz8lF8227HNQU z{f9xpsc8%RQWu1qZb-?1|Muc{cRwPq3O*$MI`tNNK`58We}G)AvC~qgp` zE>BMnMl$P->1>&}hBS#V*hrrwWX;2XW(g0lW^ zA3jA~gsPnrWtgDXbdV2%D68lF0iN_h;4HaV}dus{Sp|S9<5{aVF|h(>uooK0AMg} z%Qa4p`BWHyp0>_E4l36p2YqBSVLKyx{rLKj!g8QM$PJSA)kOTIRCMgjlc<#GnEFD1 zyiCaR6UhyF0Ei#n{)!&0InZ%EN=BbIf!q?%u9`5PLEHmrg2@~hb6_XQ!O%-^6H!7{GO?3%NPwquNCM|hZVaP@_<0m=0boF}VrKdOlFN~Q zYLB&y-eP>OeP86sNs?Qu;GZ_5+QTi{(GKzhi7XAK-^BBX<3|a7wh~waffm>Hb&fru zLiU{_0mYb-IiW(XpEAzV{-uklh(92dOzldwd3apOl*rrC;Rlhkvnvc%1X9WZUKjun zCUfx=3?$a&*?81nfom1lg&_i3%rVK&D8C%Puie$EmQ85F zcho?MWwVW+ZN(c->jPe0!@&u;*_u-{|AueYE+!-AqdeYB9{Ha{(T}LXRp(5syGQ6A`gkVr-fRLzZj=$FSetkeuDx+nP#C+ za0s|o(asO+b0T=FVDST!RY+O9&Z7W@?t5@Asv2L0AkVejp0WDy-$00huT*U7naRlP zejZ|r<5XDgke4*E+xRzmJwT!g9BgjDGsjj-H0x+PB9S+JA;-*SZA+YqHvw%x1Dget z{K+SnPNYjBtz`655OTLhi6y0I6ewlT^jLXhkpwE6m@zU7#Db?C4o8SAf&C%LiR9l3 zxuhlYJVYf$@_>K%IaEu%O$LJ_mkYM5ebIqGlL|1D``X67VH}UYBbYq$lc@0t_Tz5$ zSqO5n0tk=KqHrycZ#v?A)TY1zVBlA+JTG-C1lr(O!T1*rze~3ID_L!VBnMH_cmOH! zRUsEL<+@2eZY9_2u2P~|O5`kA8M*JNI-at0~z1Eb-f*4>Yp+rJHc`lgEppI5)3;p`h2||L( zA8YR`XcM`r1BXS@(}l!fdk4`BEf^8Q(o$xLyR=K&u_C0bv$h7HX%1MkM#SsYg8 z460yWC|(=x0~_Tv4Dma&zk$`h)hpAH9sY^`HIgvL;3T6HQpUL{m6mH2vc?By0CGlZ zcHf|H>P){fE^HqmC{ON?@gd}COFAfA6`Gy`G0o23cU~QAdW6H|g~K1tLUH0pC^#Va z6J9K42-?(%TS!a}?lKO+n|`AqUQvQ^RA_!p{4(z0_X~NJzC|pMt7TM;x2manGAXt~ zl2(VSC2?b*kHPJm|79pI6fC`6J2gCdv36rXb(!mCo{$$KORxFKAE^YFGTJw(ROqtg z1Q`v8w$V3!8@hD$y0v|292Jj=9Scsh2hN%Ijp1&=TBN5a%J0;)zo{W8bY|7rZT+Ov zCoNJo^ejB-+>23$J=*z*uXAHSEjY}SZ&x!km0>4MY)#!QFeh^n%ILpr6>pyyMyc*A zcFxfskPjyz;@TZBIt2h=sSUBCVQ8xPp##^Wo~hrLhR=z-I)!{{pfYik;mN4Pu7qr} zX9ff+D05RAxnzQxwY?I401c*U;{2fvII8WR z*4-K?f}!Txdr>%Ux%kWtcovASJrV6Dh`Fr;e~nxjCofEqfbwIVN*LzIc%(&cmmw!E zlKm>8+N?PKE>g7r%4DH#xKg29BVw!nMKG#mMy`#ItqH-n1;U1b46#-3L~=xzK#`H_ zefqu8p6$4{A&|4dt?P+E08~h=8lf%P*X_fH1hW|XWxIY`t9&Bzac2k=G~|WG9=U;c zM&6K7fjp~4X0&TpwZz`X5wuiS{0zecfm10>ElE8Wh~q&}wf*7lE90C0@O=98;ajyRoiKC+x_gUQD0RJ&M~;D5DW=*DSbkWrPYSMK~si3DV6 zm0cI>7!115l{b!%`2?=H>SW_2a&;%-dYc^odZ>(+#S`K^Al&Nt;Tu)>{X9|AKL`P2 zU(?A{MVvXj=DiQqzEoxz?h2Mb)1I+G6!Y2^i7si-?J$~cBfXKF9t-5)2EqOCTDcpa z*Yi&VSbUx0889$yFgQ88N>(ND_lA$9-V&-d`LWJ-LwsJ=?oXl$rZ9zG0A@Ls1U92V?p;7Lcu1xJMa*DRDf576QoN+MdKZ{Xl0wQO55_b@O7-ebv<#n2c4HNt*rdMVrxA^Z57Ycxg znj5=ci@z`a$H=ccTXpShmQ-2<#Wjj}Tl`0GubP8vr!O_z&%VGHYB2-<5kfvD2JyGZ zr7~_y>Ej8i_DpQ#`0z1s{+lKCXBbHTAn~mzh9B0B_;C#7$Y5Mw8yD;v`!D2UWO`7@ zmE!Y&mfR|-_C%zFx*a~I5ZOAJOm0b?hu-`#W&p`4RGkMpjNDuTCbP zXrwz3Ikm-jXV&c4ZPUoM98(TnH9m35bGj7MaakS-vcqYIE=-E%xa!Rw$9Q&olHKSOfH?H zdhkIS4H@EOaC5J#^2OShQvlzd2G`Q$x*q+oEYw^L6sHmy4tD#r*Ceu#>N6%8pM)!* z^>Qpv9w&N98hwnood`ayL>A9L^M*oVQG2`{G5y6bNN(fl)I*iK^xtWzV*=dNP7v&} zwo1WcsX<8WW^~>k2g1KGu9vhBcn!}SB( aes.BlockSize { + ct, _ = swapLastTwoBlocks(ct, aes.BlockSize) + } + mode = cipher.NewCBCDecrypter(block, iv) + message := make([]byte, len(ct)) + mode.CryptBlocks(message, ct) + return message[:len(ct)], nil + } + + // Cipher Text Stealing (CTS) using CBC interface. Ref: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing + // Get ciphertext of the 2nd to last (penultimate) block (cpb), the last block (clb) and the rest (crb) + crb, cpb, clb, _ := tailBlocks(ct, aes.BlockSize) + v := make([]byte, len(iv), len(iv)) + copy(v, iv) + var message []byte + if crb != nil { + //If there is more than just the last and the penultimate block we decrypt it and the last bloc of this becomes the iv for later + rb := make([]byte, len(crb)) + mode = cipher.NewCBCDecrypter(block, v) + v = crb[len(crb)-aes.BlockSize:] + mode.CryptBlocks(rb, crb) + message = append(message, rb...) + } + + // We need to modify the cipher text + // Decryt the 2nd to last (penultimate) block with a the original iv + pb := make([]byte, aes.BlockSize) + mode = cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(pb, cpb) + // number of byte needed to pad + npb := aes.BlockSize - len(ct)%aes.BlockSize + //pad last block using the number of bytes needed from the tail of the plaintext 2nd to last (penultimate) block + clb = append(clb, pb[len(pb)-npb:]...) + + // Now decrypt the last block in the penultimate position (iv will be from the crb, if the is no crb it's zeros) + // iv for the penultimate block decrypted in the last position becomes the modified last block + lb := make([]byte, aes.BlockSize) + mode = cipher.NewCBCDecrypter(block, v) + v = clb + mode.CryptBlocks(lb, clb) + message = append(message, lb...) + + // Now decrypt the penultimate block in the last position (iv will be from the modified last block) + mode = cipher.NewCBCDecrypter(block, v) + mode.CryptBlocks(cpb, cpb) + message = append(message, cpb...) + + // Truncate to the size of the original cipher text + return message[:len(ct)], nil +} + +func tailBlocks(b []byte, c int) ([]byte, []byte, []byte, error) { + if len(b) <= c { + return []byte{}, []byte{}, []byte{}, errors.New("bytes slice is not larger than one block so cannot tail") + } + // Get size of last block + var lbs int + if l := len(b) % aes.BlockSize; l == 0 { + lbs = aes.BlockSize + } else { + lbs = l + } + // Get last block + lb := b[len(b)-lbs:] + // Get 2nd to last (penultimate) block + pb := b[len(b)-lbs-c : len(b)-lbs] + if len(b) > 2*c { + rb := b[:len(b)-lbs-c] + return rb, pb, lb, nil + } + return nil, pb, lb, nil +} + +func swapLastTwoBlocks(b []byte, c int) ([]byte, error) { + rb, pb, lb, err := tailBlocks(b, c) + if err != nil { + return nil, err + } + var out []byte + if rb != nil { + out = append(out, rb...) + } + out = append(out, lb...) + out = append(out, pb...) + return out, nil +} + +// zeroPad pads bytes with zeros to nearest multiple of message size m. +func zeroPad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("invalid message block size when padding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("data not valid to pad: Zero size") + } + if l := len(b) % m; l != 0 { + n := m - l + z := make([]byte, n) + b = append(b, z...) + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/aescts/v2/go.mod b/vendor/github.com/jcmturner/aescts/v2/go.mod new file mode 100644 index 00000000..034c3cec --- /dev/null +++ b/vendor/github.com/jcmturner/aescts/v2/go.mod @@ -0,0 +1,5 @@ +module github.com/jcmturner/aescts/v2 + +go 1.13 + +require github.com/stretchr/testify v1.4.0 diff --git a/vendor/github.com/jcmturner/aescts/v2/go.sum b/vendor/github.com/jcmturner/aescts/v2/go.sum new file mode 100644 index 00000000..e863f517 --- /dev/null +++ b/vendor/github.com/jcmturner/aescts/v2/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jcmturner/dnsutils/v2/LICENSE b/vendor/github.com/jcmturner/dnsutils/v2/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/jcmturner/dnsutils/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jcmturner/dnsutils/v2/go.mod b/vendor/github.com/jcmturner/dnsutils/v2/go.mod new file mode 100644 index 00000000..f75ac6d2 --- /dev/null +++ b/vendor/github.com/jcmturner/dnsutils/v2/go.mod @@ -0,0 +1,5 @@ +module github.com/jcmturner/dnsutils/v2 + +go 1.13 + +require github.com/stretchr/testify v1.4.0 diff --git a/vendor/github.com/jcmturner/dnsutils/v2/go.sum b/vendor/github.com/jcmturner/dnsutils/v2/go.sum new file mode 100644 index 00000000..e863f517 --- /dev/null +++ b/vendor/github.com/jcmturner/dnsutils/v2/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jcmturner/dnsutils/v2/srv.go b/vendor/github.com/jcmturner/dnsutils/v2/srv.go new file mode 100644 index 00000000..15ea912d --- /dev/null +++ b/vendor/github.com/jcmturner/dnsutils/v2/srv.go @@ -0,0 +1,95 @@ +package dnsutils + +import ( + "math/rand" + "net" + "sort" +) + +// OrderedSRV returns a count of the results and a map keyed on the order they should be used. +// This based on the records' priority and randomised selection based on their relative weighting. +// The function's inputs are the same as those for net.LookupSRV +// To use in the correct order: +// +// count, orderedSRV, err := OrderedSRV(service, proto, name) +// i := 1 +// for i <= count { +// srv := orderedSRV[i] +// // Do something such as dial this SRV. If fails move on the the next or break if it succeeds. +// i += 1 +// } +func OrderedSRV(service, proto, name string) (int, map[int]*net.SRV, error) { + _, addrs, err := net.LookupSRV(service, proto, name) + if err != nil { + return 0, make(map[int]*net.SRV), err + } + index, osrv := orderSRV(addrs) + return index, osrv, nil +} + +func orderSRV(addrs []*net.SRV) (int, map[int]*net.SRV) { + // Initialise the ordered map + var o int + osrv := make(map[int]*net.SRV) + + prioMap := make(map[int][]*net.SRV, 0) + for _, srv := range addrs { + prioMap[int(srv.Priority)] = append(prioMap[int(srv.Priority)], srv) + } + + priorities := make([]int, 0) + for p := range prioMap { + priorities = append(priorities, p) + } + + var count int + sort.Ints(priorities) + for _, p := range priorities { + tos := weightedOrder(prioMap[p]) + for i, s := range tos { + count += 1 + osrv[o+i] = s + } + o += len(tos) + } + return count, osrv +} + +func weightedOrder(srvs []*net.SRV) map[int]*net.SRV { + // Get the total weight + var tw int + for _, s := range srvs { + tw += int(s.Weight) + } + + // Initialise the ordered map + o := 1 + osrv := make(map[int]*net.SRV) + + // Whilst there are still entries to be ordered + l := len(srvs) + for l > 0 { + i := rand.Intn(l) + s := srvs[i] + var rw int + if tw > 0 { + // Greater the weight the more likely this will be zero or less + rw = rand.Intn(tw) - int(s.Weight) + } + if rw <= 0 { + // Put entry in position + osrv[o] = s + if len(srvs) > 1 { + // Remove the entry from the source slice by swapping with the last entry and truncating + srvs[len(srvs)-1], srvs[i] = srvs[i], srvs[len(srvs)-1] + srvs = srvs[:len(srvs)-1] + l = len(srvs) + } else { + l = 0 + } + o += 1 + tw = tw - int(s.Weight) + } + } + return osrv +} diff --git a/vendor/github.com/jcmturner/gofork/LICENSE b/vendor/github.com/jcmturner/gofork/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md new file mode 100644 index 00000000..66a2a8cc --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md @@ -0,0 +1,5 @@ +This is a temporary repository that will be removed when the issues below are fixed in the core golang code. + +## Issues +* [encoding/asn1: cannot marshal into a GeneralString](https://github.com/golang/go/issues/18832) +* [encoding/asn1: cannot marshal into slice of strings and pass stringtype parameter tags to members](https://github.com/golang/go/issues/18834) \ No newline at end of file diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go new file mode 100644 index 00000000..f1bb7671 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go @@ -0,0 +1,1003 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 implements parsing of DER-encoded ASN.1 data structures, +// as defined in ITU-T Rec X.690. +// +// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,'' +// http://luca.ntop.org/Teaching/Appunti/asn1.html. +package asn1 + +// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc +// are different encoding formats for those objects. Here, we'll be dealing +// with DER, the Distinguished Encoding Rules. DER is used in X.509 because +// it's fast to parse and, unlike BER, has a unique encoding for every object. +// When calculating hashes over objects, it's important that the resulting +// bytes be the same at both ends and DER removes this margin of error. +// +// ASN.1 is very complex and this package doesn't attempt to implement +// everything by any means. + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "strconv" + "time" + "unicode/utf8" +) + +// A StructuralError suggests that the ASN.1 data is valid, but the Go type +// which is receiving it doesn't match. +type StructuralError struct { + Msg string +} + +func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg } + +// A SyntaxError suggests that the ASN.1 data is invalid. +type SyntaxError struct { + Msg string +} + +func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg } + +// We start by dealing with each of the primitive types in turn. + +// BOOLEAN + +func parseBool(bytes []byte) (ret bool, err error) { + if len(bytes) != 1 { + err = SyntaxError{"invalid boolean"} + return + } + + // DER demands that "If the encoding represents the boolean value TRUE, + // its single contents octet shall have all eight bits set to one." + // Thus only 0 and 255 are valid encoded values. + switch bytes[0] { + case 0: + ret = false + case 0xff: + ret = true + default: + err = SyntaxError{"invalid boolean"} + } + + return +} + +// INTEGER + +// checkInteger returns nil if the given bytes are a valid DER-encoded +// INTEGER and an error otherwise. +func checkInteger(bytes []byte) error { + if len(bytes) == 0 { + return StructuralError{"empty integer"} + } + if len(bytes) == 1 { + return nil + } + if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) { + return StructuralError{"integer not minimally-encoded"} + } + return nil +} + +// parseInt64 treats the given bytes as a big-endian, signed integer and +// returns the result. +func parseInt64(bytes []byte) (ret int64, err error) { + err = checkInteger(bytes) + if err != nil { + return + } + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = StructuralError{"integer too large"} + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +// parseInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseInt32(bytes []byte) (int32, error) { + if err := checkInteger(bytes); err != nil { + return 0, err + } + ret64, err := parseInt64(bytes) + if err != nil { + return 0, err + } + if ret64 != int64(int32(ret64)) { + return 0, StructuralError{"integer too large"} + } + return int32(ret64), nil +} + +var bigOne = big.NewInt(1) + +// parseBigInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseBigInt(bytes []byte) (*big.Int, error) { + if err := checkInteger(bytes); err != nil { + return nil, err + } + ret := new(big.Int) + if len(bytes) > 0 && bytes[0]&0x80 == 0x80 { + // This is a negative number. + notBytes := make([]byte, len(bytes)) + for i := range notBytes { + notBytes[i] = ^bytes[i] + } + ret.SetBytes(notBytes) + ret.Add(ret, bigOne) + ret.Neg(ret) + return ret, nil + } + ret.SetBytes(bytes) + return ret, nil +} + +// BIT STRING + +// BitString is the structure to use when you want an ASN.1 BIT STRING type. A +// bit string is padded up to the nearest byte in memory and the number of +// valid bits is recorded. Padding bits will be zero. +type BitString struct { + Bytes []byte // bits packed into bytes. + BitLength int // length in bits. +} + +// At returns the bit at the given index. If the index is out of range it +// returns false. +func (b BitString) At(i int) int { + if i < 0 || i >= b.BitLength { + return 0 + } + x := i / 8 + y := 7 - uint(i%8) + return int(b.Bytes[x]>>y) & 1 +} + +// RightAlign returns a slice where the padding bits are at the beginning. The +// slice may share memory with the BitString. +func (b BitString) RightAlign() []byte { + shift := uint(8 - (b.BitLength % 8)) + if shift == 8 || len(b.Bytes) == 0 { + return b.Bytes + } + + a := make([]byte, len(b.Bytes)) + a[0] = b.Bytes[0] >> shift + for i := 1; i < len(b.Bytes); i++ { + a[i] = b.Bytes[i-1] << (8 - shift) + a[i] |= b.Bytes[i] >> shift + } + + return a +} + +// parseBitString parses an ASN.1 bit string from the given byte slice and returns it. +func parseBitString(bytes []byte) (ret BitString, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length BIT STRING"} + return + } + paddingBits := int(bytes[0]) + if paddingBits > 7 || + len(bytes) == 1 && paddingBits > 0 || + bytes[len(bytes)-1]&((1< 0 { + s += "." + } + s += strconv.Itoa(v) + } + + return s +} + +// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and +// returns it. An object identifier is a sequence of variable length integers +// that are assigned in a hierarchy. +func parseObjectIdentifier(bytes []byte) (s []int, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length OBJECT IDENTIFIER"} + return + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + s = make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + v, offset, err := parseBase128Int(bytes, 0) + if err != nil { + return + } + if v < 80 { + s[0] = v / 40 + s[1] = v % 40 + } else { + s[0] = 2 + s[1] = v - 80 + } + + i := 2 + for ; offset < len(bytes); i++ { + v, offset, err = parseBase128Int(bytes, offset) + if err != nil { + return + } + s[i] = v + } + s = s[0:i] + return +} + +// ENUMERATED + +// An Enumerated is represented as a plain int. +type Enumerated int + +// FLAG + +// A Flag accepts any data and is set to true if present. +type Flag bool + +// parseBase128Int parses a base-128 encoded int from the given offset in the +// given byte slice. It returns the value and the new offset. +func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) { + offset = initOffset + for shifted := 0; offset < len(bytes); shifted++ { + if shifted == 4 { + err = StructuralError{"base 128 integer too large"} + return + } + ret <<= 7 + b := bytes[offset] + ret |= int(b & 0x7f) + offset++ + if b&0x80 == 0 { + return + } + } + err = SyntaxError{"truncated base 128 integer"} + return +} + +// UTCTime + +func parseUTCTime(bytes []byte) (ret time.Time, err error) { + s := string(bytes) + + formatStr := "0601021504Z0700" + ret, err = time.Parse(formatStr, s) + if err != nil { + formatStr = "060102150405Z0700" + ret, err = time.Parse(formatStr, s) + } + if err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + return + } + + if ret.Year() >= 2050 { + // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + ret = ret.AddDate(-100, 0, 0) + } + + return +} + +// parseGeneralizedTime parses the GeneralizedTime from the given byte slice +// and returns the resulting time. +func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) { + const formatStr = "20060102150405Z0700" + s := string(bytes) + + if ret, err = time.Parse(formatStr, s); err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + } + + return +} + +// PrintableString + +// parsePrintableString parses a ASN.1 PrintableString from the given byte +// array and returns it. +func parsePrintableString(bytes []byte) (ret string, err error) { + for _, b := range bytes { + if !isPrintable(b) { + err = SyntaxError{"PrintableString contains invalid character"} + return + } + } + ret = string(bytes) + return +} + +// isPrintable reports whether the given b is in the ASN.1 PrintableString set. +func isPrintable(b byte) bool { + return 'a' <= b && b <= 'z' || + 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || + '\'' <= b && b <= ')' || + '+' <= b && b <= '/' || + b == ' ' || + b == ':' || + b == '=' || + b == '?' || + // This is technically not allowed in a PrintableString. + // However, x509 certificates with wildcard strings don't + // always use the correct string type so we permit it. + b == '*' +} + +// IA5String + +// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given +// byte slice and returns it. +func parseIA5String(bytes []byte) (ret string, err error) { + for _, b := range bytes { + if b >= utf8.RuneSelf { + err = SyntaxError{"IA5String contains invalid character"} + return + } + } + ret = string(bytes) + return +} + +// T61String + +// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given +// byte slice and returns it. +func parseT61String(bytes []byte) (ret string, err error) { + return string(bytes), nil +} + +// UTF8String + +// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte +// array and returns it. +func parseUTF8String(bytes []byte) (ret string, err error) { + if !utf8.Valid(bytes) { + return "", errors.New("asn1: invalid UTF-8 string") + } + return string(bytes), nil +} + +// A RawValue represents an undecoded ASN.1 object. +type RawValue struct { + Class, Tag int + IsCompound bool + Bytes []byte + FullBytes []byte // includes the tag and length +} + +// RawContent is used to signal that the undecoded, DER data needs to be +// preserved for a struct. To use it, the first field of the struct must have +// this type. It's an error for any of the other fields to have this type. +type RawContent []byte + +// Tagging + +// parseTagAndLength parses an ASN.1 tag and length pair from the given offset +// into a byte slice. It returns the parsed data and the new offset. SET and +// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we +// don't distinguish between ordered and unordered objects in this code. +func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) { + offset = initOffset + // parseTagAndLength should not be called without at least a single + // byte to read. Thus this check is for robustness: + if offset >= len(bytes) { + err = errors.New("asn1: internal error in parseTagAndLength") + return + } + b := bytes[offset] + offset++ + ret.class = int(b >> 6) + ret.isCompound = b&0x20 == 0x20 + ret.tag = int(b & 0x1f) + + // If the bottom five bits are set, then the tag number is actually base 128 + // encoded afterwards + if ret.tag == 0x1f { + ret.tag, offset, err = parseBase128Int(bytes, offset) + if err != nil { + return + } + // Tags should be encoded in minimal form. + if ret.tag < 0x1f { + err = SyntaxError{"non-minimal tag"} + return + } + } + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length"} + return + } + b = bytes[offset] + offset++ + if b&0x80 == 0 { + // The length is encoded in the bottom 7 bits. + ret.length = int(b & 0x7f) + } else { + // Bottom 7 bits give the number of length bytes to follow. + numBytes := int(b & 0x7f) + if numBytes == 0 { + err = SyntaxError{"indefinite length found (not DER)"} + return + } + ret.length = 0 + for i := 0; i < numBytes; i++ { + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length"} + return + } + b = bytes[offset] + offset++ + if ret.length >= 1<<23 { + // We can't shift ret.length up without + // overflowing. + err = StructuralError{"length too large"} + return + } + ret.length <<= 8 + ret.length |= int(b) + if ret.length == 0 { + // DER requires that lengths be minimal. + err = StructuralError{"superfluous leading zeros in length"} + return + } + } + // Short lengths must be encoded in short form. + if ret.length < 0x80 { + err = StructuralError{"non-minimal length"} + return + } + } + + return +} + +// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse +// a number of ASN.1 values from the given byte slice and returns them as a +// slice of Go values of the given type. +func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) { + expectedTag, compoundType, ok := getUniversalType(elemType) + if !ok { + err = StructuralError{"unknown Go type for slice"} + return + } + + // First we iterate over the input and count the number of elements, + // checking that the types are correct in each case. + numElements := 0 + for offset := 0; offset < len(bytes); { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String: + // We pretend that various other string types are + // PRINTABLE STRINGs so that a sequence of them can be + // parsed into a []string. + t.tag = TagPrintableString + case TagGeneralizedTime, TagUTCTime: + // Likewise, both time types are treated the same. + t.tag = TagUTCTime + } + + if t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag { + err = StructuralError{"sequence tag mismatch"} + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"truncated sequence"} + return + } + offset += t.length + numElements++ + } + ret = reflect.MakeSlice(sliceType, numElements, numElements) + params := fieldParameters{} + offset := 0 + for i := 0; i < numElements; i++ { + offset, err = parseField(ret.Index(i), bytes, offset, params) + if err != nil { + return + } + } + return +} + +var ( + bitStringType = reflect.TypeOf(BitString{}) + objectIdentifierType = reflect.TypeOf(ObjectIdentifier{}) + enumeratedType = reflect.TypeOf(Enumerated(0)) + flagType = reflect.TypeOf(Flag(false)) + timeType = reflect.TypeOf(time.Time{}) + rawValueType = reflect.TypeOf(RawValue{}) + rawContentsType = reflect.TypeOf(RawContent(nil)) + bigIntType = reflect.TypeOf(new(big.Int)) +) + +// invalidLength returns true iff offset + length > sliceLength, or if the +// addition would overflow. +func invalidLength(offset, length, sliceLength int) bool { + return offset+length < offset || offset+length > sliceLength +} + +// parseField is the main parsing function. Given a byte slice and an offset +// into the array, it will try to parse a suitable ASN.1 value out and store it +// in the given Value. +func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) { + offset = initOffset + fieldType := v.Type() + + // If we have run out of data, it may be that there are optional elements at the end. + if offset == len(bytes) { + if !setDefaultValue(v, params) { + err = SyntaxError{"sequence truncated"} + } + return + } + + // Deal with raw values. + if fieldType == rawValueType { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]} + offset += t.length + v.Set(reflect.ValueOf(result)) + return + } + + // Deal with the ANY type. + if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + var result interface{} + if !t.isCompound && t.class == ClassUniversal { + innerBytes := bytes[offset : offset+t.length] + switch t.tag { + case TagPrintableString: + result, err = parsePrintableString(innerBytes) + case TagIA5String: + result, err = parseIA5String(innerBytes) + // jtasn1 addition of following case + case TagGeneralString: + result, err = parseIA5String(innerBytes) + case TagT61String: + result, err = parseT61String(innerBytes) + case TagUTF8String: + result, err = parseUTF8String(innerBytes) + case TagInteger: + result, err = parseInt64(innerBytes) + case TagBitString: + result, err = parseBitString(innerBytes) + case TagOID: + result, err = parseObjectIdentifier(innerBytes) + case TagUTCTime: + result, err = parseUTCTime(innerBytes) + case TagGeneralizedTime: + result, err = parseGeneralizedTime(innerBytes) + case TagOctetString: + result = innerBytes + default: + // If we don't know how to handle the type, we just leave Value as nil. + } + } + offset += t.length + if err != nil { + return + } + if result != nil { + v.Set(reflect.ValueOf(result)) + } + return + } + universalTag, compoundType, ok1 := getUniversalType(fieldType) + if !ok1 { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)} + return + } + + t, offset, err := parseTagAndLength(bytes, offset) + if err != nil { + return + } + if params.explicit { + expectedClass := ClassContextSpecific + if params.application { + expectedClass = ClassApplication + } + if offset == len(bytes) { + err = StructuralError{"explicit tag has no child"} + return + } + if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) { + if t.length > 0 { + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + } else { + if fieldType != flagType { + err = StructuralError{"zero length explicit tag was not an asn1.Flag"} + return + } + v.SetBool(true) + return + } + } else { + // The tags didn't match, it might be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{"explicitly tagged member didn't match"} + } + return + } + } + + // Special case for strings: all the ASN.1 string types map to the Go + // type string. getUniversalType returns the tag for PrintableString + // when it sees a string, so if we see a different string type on the + // wire, we change the universal type to match. + if universalTag == TagPrintableString { + if t.class == ClassUniversal { + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String: + universalTag = t.tag + } + } else if params.stringType != 0 { + universalTag = params.stringType + } + } + + // Special case for time: UTCTime and GeneralizedTime both map to the + // Go type time.Time. + if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal { + universalTag = TagGeneralizedTime + } + + if params.set { + universalTag = TagSet + } + + expectedClass := ClassUniversal + expectedTag := universalTag + + if !params.explicit && params.tag != nil { + expectedClass = ClassContextSpecific + expectedTag = *params.tag + } + + if !params.explicit && params.application && params.tag != nil { + expectedClass = ClassApplication + expectedTag = *params.tag + } + + // We have unwrapped any explicit tagging at this point. + if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType { + // Tags don't match. Again, it could be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)} + } + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + innerBytes := bytes[offset : offset+t.length] + offset += t.length + + // We deal with the structures defined in this package first. + switch fieldType { + case objectIdentifierType: + newSlice, err1 := parseObjectIdentifier(innerBytes) + v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice))) + if err1 == nil { + reflect.Copy(v, reflect.ValueOf(newSlice)) + } + err = err1 + return + case bitStringType: + bs, err1 := parseBitString(innerBytes) + if err1 == nil { + v.Set(reflect.ValueOf(bs)) + } + err = err1 + return + case timeType: + var time time.Time + var err1 error + if universalTag == TagUTCTime { + time, err1 = parseUTCTime(innerBytes) + } else { + time, err1 = parseGeneralizedTime(innerBytes) + } + if err1 == nil { + v.Set(reflect.ValueOf(time)) + } + err = err1 + return + case enumeratedType: + parsedInt, err1 := parseInt32(innerBytes) + if err1 == nil { + v.SetInt(int64(parsedInt)) + } + err = err1 + return + case flagType: + v.SetBool(true) + return + case bigIntType: + parsedInt, err1 := parseBigInt(innerBytes) + if err1 == nil { + v.Set(reflect.ValueOf(parsedInt)) + } + err = err1 + return + } + switch val := v; val.Kind() { + case reflect.Bool: + parsedBool, err1 := parseBool(innerBytes) + if err1 == nil { + val.SetBool(parsedBool) + } + err = err1 + return + case reflect.Int, reflect.Int32, reflect.Int64: + if val.Type().Size() == 4 { + parsedInt, err1 := parseInt32(innerBytes) + if err1 == nil { + val.SetInt(int64(parsedInt)) + } + err = err1 + } else { + parsedInt, err1 := parseInt64(innerBytes) + if err1 == nil { + val.SetInt(parsedInt) + } + err = err1 + } + return + // TODO(dfc) Add support for the remaining integer types + case reflect.Struct: + structType := fieldType + + if structType.NumField() > 0 && + structType.Field(0).Type == rawContentsType { + bytes := bytes[initOffset:offset] + val.Field(0).Set(reflect.ValueOf(RawContent(bytes))) + } + + innerOffset := 0 + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + if i == 0 && field.Type == rawContentsType { + continue + } + innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1"))) + if err != nil { + return + } + } + // We allow extra bytes at the end of the SEQUENCE because + // adding elements to the end has been used in X.509 as the + // version numbers have increased. + return + case reflect.Slice: + sliceType := fieldType + if sliceType.Elem().Kind() == reflect.Uint8 { + val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes))) + reflect.Copy(val, reflect.ValueOf(innerBytes)) + return + } + newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem()) + if err1 == nil { + val.Set(newSlice) + } + err = err1 + return + case reflect.String: + var v string + switch universalTag { + case TagPrintableString: + v, err = parsePrintableString(innerBytes) + case TagIA5String: + v, err = parseIA5String(innerBytes) + case TagT61String: + v, err = parseT61String(innerBytes) + case TagUTF8String: + v, err = parseUTF8String(innerBytes) + case TagGeneralString: + // GeneralString is specified in ISO-2022/ECMA-35, + // A brief review suggests that it includes structures + // that allow the encoding to change midstring and + // such. We give up and pass it as an 8-bit string. + v, err = parseT61String(innerBytes) + default: + err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)} + } + if err == nil { + val.SetString(v) + } + return + } + err = StructuralError{"unsupported: " + v.Type().String()} + return +} + +// canHaveDefaultValue reports whether k is a Kind that we will set a default +// value for. (A signed integer, essentially.) +func canHaveDefaultValue(k reflect.Kind) bool { + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + } + + return false +} + +// setDefaultValue is used to install a default value, from a tag string, into +// a Value. It is successful if the field was optional, even if a default value +// wasn't provided or it failed to install it into the Value. +func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { + if !params.optional { + return + } + ok = true + if params.defaultValue == nil { + return + } + if canHaveDefaultValue(v.Kind()) { + v.SetInt(*params.defaultValue) + } + return +} + +// Unmarshal parses the DER-encoded ASN.1 data structure b +// and uses the reflect package to fill in an arbitrary value pointed at by val. +// Because Unmarshal uses the reflect package, the structs +// being written to must use upper case field names. +// +// An ASN.1 INTEGER can be written to an int, int32, int64, +// or *big.Int (from the math/big package). +// If the encoded value does not fit in the Go type, +// Unmarshal returns a parse error. +// +// An ASN.1 BIT STRING can be written to a BitString. +// +// An ASN.1 OCTET STRING can be written to a []byte. +// +// An ASN.1 OBJECT IDENTIFIER can be written to an +// ObjectIdentifier. +// +// An ASN.1 ENUMERATED can be written to an Enumerated. +// +// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time. +// +// An ASN.1 PrintableString or IA5String can be written to a string. +// +// Any of the above ASN.1 values can be written to an interface{}. +// The value stored in the interface has the corresponding Go type. +// For integers, that type is int64. +// +// An ASN.1 SEQUENCE OF x or SET OF x can be written +// to a slice if an x can be written to the slice's element type. +// +// An ASN.1 SEQUENCE or SET can be written to a struct +// if each of the elements in the sequence can be +// written to the corresponding element in the struct. +// +// The following tags on struct fields have special meaning to Unmarshal: +// +// application specifies that a APPLICATION tag is used +// default:x sets the default value for optional integer fields +// explicit specifies that an additional, explicit tag wraps the implicit one +// optional marks the field as ASN.1 OPTIONAL +// set causes a SET, rather than a SEQUENCE type to be expected +// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC +// +// If the type of the first field of a structure is RawContent then the raw +// ASN1 contents of the struct will be stored in it. +// +// If the type name of a slice element ends with "SET" then it's treated as if +// the "set" tag was set on it. This can be used with nested slices where a +// struct tag cannot be given. +// +// Other ASN.1 types are not supported; if it encounters them, +// Unmarshal returns a parse error. +func Unmarshal(b []byte, val interface{}) (rest []byte, err error) { + return UnmarshalWithParams(b, val, "") +} + +// UnmarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) { + v := reflect.ValueOf(val).Elem() + offset, err := parseField(v, b, 0, parseFieldParameters(params)) + if err != nil { + return nil, err + } + return b[offset:], nil +} diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go new file mode 100644 index 00000000..7a9da49f --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go @@ -0,0 +1,173 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "reflect" + "strconv" + "strings" +) + +// ASN.1 objects have metadata preceding them: +// the tag: the type of the object +// a flag denoting if this object is compound or not +// the class type: the namespace of the tag +// the length of the object, in bytes + +// Here are some standard tags and classes + +// ASN.1 tags represent the type of the following object. +const ( + TagBoolean = 1 + TagInteger = 2 + TagBitString = 3 + TagOctetString = 4 + TagOID = 6 + TagEnum = 10 + TagUTF8String = 12 + TagSequence = 16 + TagSet = 17 + TagPrintableString = 19 + TagT61String = 20 + TagIA5String = 22 + TagUTCTime = 23 + TagGeneralizedTime = 24 + TagGeneralString = 27 +) + +// ASN.1 class types represent the namespace of the tag. +const ( + ClassUniversal = 0 + ClassApplication = 1 + ClassContextSpecific = 2 + ClassPrivate = 3 +) + +type tagAndLength struct { + class, tag, length int + isCompound bool +} + +// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead +// of" and "in addition to". When not specified, every primitive type has a +// default tag in the UNIVERSAL class. +// +// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1 +// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT +// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another. +// +// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an +// /additional/ tag would wrap the default tag. This explicit tag will have the +// compound flag set. +// +// (This is used in order to remove ambiguity with optional elements.) +// +// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we +// don't support that here. We support a single layer of EXPLICIT or IMPLICIT +// tagging with tag strings on the fields of a structure. + +// fieldParameters is the parsed representation of tag string from a structure field. +type fieldParameters struct { + optional bool // true iff the field is OPTIONAL + explicit bool // true iff an EXPLICIT tag is in use. + application bool // true iff an APPLICATION tag is in use. + defaultValue *int64 // a default value for INTEGER typed fields (maybe nil). + tag *int // the EXPLICIT or IMPLICIT tag (maybe nil). + stringType int // the string tag to use when marshaling. + timeType int // the time tag to use when marshaling. + set bool // true iff this should be encoded as a SET + omitEmpty bool // true iff this should be omitted if empty when marshaling. + + // Invariants: + // if explicit is set, tag is non-nil. +} + +// Given a tag string with the format specified in the package comment, +// parseFieldParameters will parse it into a fieldParameters structure, +// ignoring unknown parts of the string. +func parseFieldParameters(str string) (ret fieldParameters) { + for _, part := range strings.Split(str, ",") { + switch { + case part == "optional": + ret.optional = true + case part == "explicit": + ret.explicit = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "generalized": + ret.timeType = TagGeneralizedTime + case part == "utc": + ret.timeType = TagUTCTime + case part == "ia5": + ret.stringType = TagIA5String + // jtasn1 case below added + case part == "generalstring": + ret.stringType = TagGeneralString + case part == "printable": + ret.stringType = TagPrintableString + case part == "utf8": + ret.stringType = TagUTF8String + case strings.HasPrefix(part, "default:"): + i, err := strconv.ParseInt(part[8:], 10, 64) + if err == nil { + ret.defaultValue = new(int64) + *ret.defaultValue = i + } + case strings.HasPrefix(part, "tag:"): + i, err := strconv.Atoi(part[4:]) + if err == nil { + ret.tag = new(int) + *ret.tag = i + } + case part == "set": + ret.set = true + case part == "application": + ret.application = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "omitempty": + ret.omitEmpty = true + } + } + return +} + +// Given a reflected Go type, getUniversalType returns the default tag number +// and expected compound flag. +func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) { + switch t { + case objectIdentifierType: + return TagOID, false, true + case bitStringType: + return TagBitString, false, true + case timeType: + return TagUTCTime, false, true + case enumeratedType: + return TagEnum, false, true + case bigIntType: + return TagInteger, false, true + } + switch t.Kind() { + case reflect.Bool: + return TagBoolean, false, true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return TagInteger, false, true + case reflect.Struct: + return TagSequence, true, true + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return TagOctetString, false, true + } + if strings.HasSuffix(t.Name(), "SET") { + return TagSet, true, true + } + return TagSequence, true, true + case reflect.String: + return TagPrintableString, false, true + } + return 0, false, false +} diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go new file mode 100644 index 00000000..f52eee9d --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go @@ -0,0 +1,659 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "time" + "unicode/utf8" +) + +// A forkableWriter is an in-memory buffer that can be +// 'forked' to create new forkableWriters that bracket the +// original. After +// pre, post := w.fork() +// the overall sequence of bytes represented is logically w+pre+post. +type forkableWriter struct { + *bytes.Buffer + pre, post *forkableWriter +} + +func newForkableWriter() *forkableWriter { + return &forkableWriter{new(bytes.Buffer), nil, nil} +} + +func (f *forkableWriter) fork() (pre, post *forkableWriter) { + if f.pre != nil || f.post != nil { + panic("have already forked") + } + f.pre = newForkableWriter() + f.post = newForkableWriter() + return f.pre, f.post +} + +func (f *forkableWriter) Len() (l int) { + l += f.Buffer.Len() + if f.pre != nil { + l += f.pre.Len() + } + if f.post != nil { + l += f.post.Len() + } + return +} + +func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) { + n, err = out.Write(f.Bytes()) + if err != nil { + return + } + + var nn int + + if f.pre != nil { + nn, err = f.pre.writeTo(out) + n += nn + if err != nil { + return + } + } + + if f.post != nil { + nn, err = f.post.writeTo(out) + n += nn + } + return +} + +func marshalBase128Int(out *forkableWriter, n int64) (err error) { + if n == 0 { + err = out.WriteByte(0) + return + } + + l := 0 + for i := n; i > 0; i >>= 7 { + l++ + } + + for i := l - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + err = out.WriteByte(o) + if err != nil { + return + } + } + + return nil +} + +func marshalInt64(out *forkableWriter, i int64) (err error) { + n := int64Length(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +func int64Length(i int64) (numBytes int) { + numBytes = 1 + + for i > 127 { + numBytes++ + i >>= 8 + } + + for i < -128 { + numBytes++ + i >>= 8 + } + + return +} + +func marshalBigInt(out *forkableWriter, n *big.Int) (err error) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + err = out.WriteByte(0xff) + if err != nil { + return + } + } + _, err = out.Write(bytes) + } else if n.Sign() == 0 { + // Zero is written as a single 0 zero rather than no bytes. + err = out.WriteByte(0x00) + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with 0x00 in order to stop it + // looking like a negative number. + err = out.WriteByte(0) + if err != nil { + return + } + } + _, err = out.Write(bytes) + } + return +} + +func marshalLength(out *forkableWriter, i int) (err error) { + n := lengthLength(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) { + b := uint8(t.class) << 6 + if t.isCompound { + b |= 0x20 + } + if t.tag >= 31 { + b |= 0x1f + err = out.WriteByte(b) + if err != nil { + return + } + err = marshalBase128Int(out, int64(t.tag)) + if err != nil { + return + } + } else { + b |= uint8(t.tag) + err = out.WriteByte(b) + if err != nil { + return + } + } + + if t.length >= 128 { + l := lengthLength(t.length) + err = out.WriteByte(0x80 | byte(l)) + if err != nil { + return + } + err = marshalLength(out, t.length) + if err != nil { + return + } + } else { + err = out.WriteByte(byte(t.length)) + if err != nil { + return + } + } + + return nil +} + +func marshalBitString(out *forkableWriter, b BitString) (err error) { + paddingBits := byte((8 - b.BitLength%8) % 8) + err = out.WriteByte(paddingBits) + if err != nil { + return + } + _, err = out.Write(b.Bytes) + return +} + +func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) { + if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { + return StructuralError{"invalid object identifier"} + } + + err = marshalBase128Int(out, int64(oid[0]*40+oid[1])) + if err != nil { + return + } + for i := 2; i < len(oid); i++ { + err = marshalBase128Int(out, int64(oid[i])) + if err != nil { + return + } + } + + return +} + +func marshalPrintableString(out *forkableWriter, s string) (err error) { + b := []byte(s) + for _, c := range b { + if !isPrintable(c) { + return StructuralError{"PrintableString contains invalid character"} + } + } + + _, err = out.Write(b) + return +} + +func marshalIA5String(out *forkableWriter, s string) (err error) { + b := []byte(s) + for _, c := range b { + if c > 127 { + return StructuralError{"IA5String contains invalid character"} + } + } + + _, err = out.Write(b) + return +} + +func marshalUTF8String(out *forkableWriter, s string) (err error) { + _, err = out.Write([]byte(s)) + return +} + +func marshalTwoDigits(out *forkableWriter, v int) (err error) { + err = out.WriteByte(byte('0' + (v/10)%10)) + if err != nil { + return + } + return out.WriteByte(byte('0' + v%10)) +} + +func marshalFourDigits(out *forkableWriter, v int) (err error) { + var bytes [4]byte + for i := range bytes { + bytes[3-i] = '0' + byte(v%10) + v /= 10 + } + _, err = out.Write(bytes[:]) + return +} + +func outsideUTCRange(t time.Time) bool { + year := t.Year() + return year < 1950 || year >= 2050 +} + +func marshalUTCTime(out *forkableWriter, t time.Time) (err error) { + year := t.Year() + + switch { + case 1950 <= year && year < 2000: + err = marshalTwoDigits(out, year-1900) + case 2000 <= year && year < 2050: + err = marshalTwoDigits(out, year-2000) + default: + return StructuralError{"cannot represent time as UTCTime"} + } + if err != nil { + return + } + + return marshalTimeCommon(out, t) +} + +func marshalGeneralizedTime(out *forkableWriter, t time.Time) (err error) { + year := t.Year() + if year < 0 || year > 9999 { + return StructuralError{"cannot represent time as GeneralizedTime"} + } + if err = marshalFourDigits(out, year); err != nil { + return + } + + return marshalTimeCommon(out, t) +} + +func marshalTimeCommon(out *forkableWriter, t time.Time) (err error) { + _, month, day := t.Date() + + err = marshalTwoDigits(out, int(month)) + if err != nil { + return + } + + err = marshalTwoDigits(out, day) + if err != nil { + return + } + + hour, min, sec := t.Clock() + + err = marshalTwoDigits(out, hour) + if err != nil { + return + } + + err = marshalTwoDigits(out, min) + if err != nil { + return + } + + err = marshalTwoDigits(out, sec) + if err != nil { + return + } + + _, offset := t.Zone() + + switch { + case offset/60 == 0: + err = out.WriteByte('Z') + return + case offset > 0: + err = out.WriteByte('+') + case offset < 0: + err = out.WriteByte('-') + } + + if err != nil { + return + } + + offsetMinutes := offset / 60 + if offsetMinutes < 0 { + offsetMinutes = -offsetMinutes + } + + err = marshalTwoDigits(out, offsetMinutes/60) + if err != nil { + return + } + + err = marshalTwoDigits(out, offsetMinutes%60) + return +} + +func stripTagAndLength(in []byte) []byte { + _, offset, err := parseTagAndLength(in, 0) + if err != nil { + return in + } + return in[offset:] +} + +func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) { + switch value.Type() { + case flagType: + return nil + case timeType: + t := value.Interface().(time.Time) + if params.timeType == TagGeneralizedTime || outsideUTCRange(t) { + return marshalGeneralizedTime(out, t) + } else { + return marshalUTCTime(out, t) + } + case bitStringType: + return marshalBitString(out, value.Interface().(BitString)) + case objectIdentifierType: + return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier)) + case bigIntType: + return marshalBigInt(out, value.Interface().(*big.Int)) + } + + switch v := value; v.Kind() { + case reflect.Bool: + if v.Bool() { + return out.WriteByte(255) + } else { + return out.WriteByte(0) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return marshalInt64(out, v.Int()) + case reflect.Struct: + t := v.Type() + + startingField := 0 + + // If the first element of the structure is a non-empty + // RawContents, then we don't bother serializing the rest. + if t.NumField() > 0 && t.Field(0).Type == rawContentsType { + s := v.Field(0) + if s.Len() > 0 { + bytes := make([]byte, s.Len()) + for i := 0; i < s.Len(); i++ { + bytes[i] = uint8(s.Index(i).Uint()) + } + /* The RawContents will contain the tag and + * length fields but we'll also be writing + * those ourselves, so we strip them out of + * bytes */ + _, err = out.Write(stripTagAndLength(bytes)) + return + } else { + startingField = 1 + } + } + + for i := startingField; i < t.NumField(); i++ { + var pre *forkableWriter + pre, out = out.fork() + err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1"))) + if err != nil { + return + } + } + return + case reflect.Slice: + sliceType := v.Type() + if sliceType.Elem().Kind() == reflect.Uint8 { + bytes := make([]byte, v.Len()) + for i := 0; i < v.Len(); i++ { + bytes[i] = uint8(v.Index(i).Uint()) + } + _, err = out.Write(bytes) + return + } + + // jtasn1 Pass on the tags to the members but need to unset explicit switch and implicit value + //var fp fieldParameters + params.explicit = false + params.tag = nil + for i := 0; i < v.Len(); i++ { + var pre *forkableWriter + pre, out = out.fork() + err = marshalField(pre, v.Index(i), params) + if err != nil { + return + } + } + return + case reflect.String: + switch params.stringType { + case TagIA5String: + return marshalIA5String(out, v.String()) + case TagPrintableString: + return marshalPrintableString(out, v.String()) + default: + return marshalUTF8String(out, v.String()) + } + } + + return StructuralError{"unknown Go type"} +} + +func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) { + if !v.IsValid() { + return fmt.Errorf("asn1: cannot marshal nil value") + } + // If the field is an interface{} then recurse into it. + if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 { + return marshalField(out, v.Elem(), params) + } + + if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty { + return + } + + if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) { + defaultValue := reflect.New(v.Type()).Elem() + defaultValue.SetInt(*params.defaultValue) + + if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) { + return + } + } + + // If no default value is given then the zero value for the type is + // assumed to be the default value. This isn't obviously the correct + // behaviour, but it's what Go has traditionally done. + if params.optional && params.defaultValue == nil { + if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { + return + } + } + + if v.Type() == rawValueType { + rv := v.Interface().(RawValue) + if len(rv.FullBytes) != 0 { + _, err = out.Write(rv.FullBytes) + } else { + err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound}) + if err != nil { + return + } + _, err = out.Write(rv.Bytes) + } + return + } + + tag, isCompound, ok := getUniversalType(v.Type()) + if !ok { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())} + return + } + class := ClassUniversal + + if params.timeType != 0 && tag != TagUTCTime { + return StructuralError{"explicit time type given to non-time member"} + } + + // jtasn1 updated to allow slices of strings + if params.stringType != 0 && !(tag == TagPrintableString || (v.Kind() == reflect.Slice && tag == 16 && v.Type().Elem().Kind() == reflect.String)) { + return StructuralError{"explicit string type given to non-string member"} + } + + switch tag { + case TagPrintableString: + if params.stringType == 0 { + // This is a string without an explicit string type. We'll use + // a PrintableString if the character set in the string is + // sufficiently limited, otherwise we'll use a UTF8String. + for _, r := range v.String() { + if r >= utf8.RuneSelf || !isPrintable(byte(r)) { + if !utf8.ValidString(v.String()) { + return errors.New("asn1: string not valid UTF-8") + } + tag = TagUTF8String + break + } + } + } else { + tag = params.stringType + } + case TagUTCTime: + if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) { + tag = TagGeneralizedTime + } + } + + if params.set { + if tag != TagSequence { + return StructuralError{"non sequence tagged as set"} + } + tag = TagSet + } + + tags, body := out.fork() + + err = marshalBody(body, v, params) + if err != nil { + return + } + + bodyLen := body.Len() + + var explicitTag *forkableWriter + if params.explicit { + explicitTag, tags = tags.fork() + } + + if !params.explicit && params.tag != nil { + // implicit tag. + tag = *params.tag + class = ClassContextSpecific + } + + err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound}) + if err != nil { + return + } + + if params.explicit { + err = marshalTagAndLength(explicitTag, tagAndLength{ + class: ClassContextSpecific, + tag: *params.tag, + length: bodyLen + tags.Len(), + isCompound: true, + }) + } + + return err +} + +// Marshal returns the ASN.1 encoding of val. +// +// In addition to the struct tags recognised by Unmarshal, the following can be +// used: +// +// ia5: causes strings to be marshaled as ASN.1, IA5 strings +// omitempty: causes empty slices to be skipped +// printable: causes strings to be marshaled as ASN.1, PrintableString strings. +// utf8: causes strings to be marshaled as ASN.1, UTF8 strings +func Marshal(val interface{}) ([]byte, error) { + var out bytes.Buffer + v := reflect.ValueOf(val) + f := newForkableWriter() + err := marshalField(f, v, fieldParameters{}) + if err != nil { + return nil, err + } + _, err = f.writeTo(&out) + return out.Bytes(), err +} diff --git a/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 00000000..75d41876 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,98 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + return Key64(password, salt, int64(iter), int64(keyLen), h) +} + +// Key64 derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. Key64 uses +// int64 for the iteration count and key length to allow larger values. +// The key is derived based on the method described as PBKDF2 with the HMAC +// variant using the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key64(password, salt []byte, iter, keyLen int64, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := int64(prf.Size()) + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := int64(1); block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[int64(len(dk))-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := int64(2); n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/LICENSE b/vendor/github.com/jcmturner/gokrb5/v8/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jcmturner/gokrb5/v8/asn1tools/tools.go b/vendor/github.com/jcmturner/gokrb5/v8/asn1tools/tools.go new file mode 100644 index 00000000..f27740b9 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/asn1tools/tools.go @@ -0,0 +1,86 @@ +// Package asn1tools provides tools for managing ASN1 marshaled data. +package asn1tools + +import ( + "github.com/jcmturner/gofork/encoding/asn1" +) + +// MarshalLengthBytes returns the ASN1 encoded bytes for the length 'l' +// +// There are two forms: short (for lengths between 0 and 127), and long definite (for lengths between 0 and 2^1008 -1). +// +// Short form: One octet. Bit 8 has value "0" and bits 7-1 give the length. +// +// Long form: Two to 127 octets. Bit 8 of first octet has value "1" and bits 7-1 give the number of additional length octets. Second and following octets give the length, base 256, most significant digit first. +func MarshalLengthBytes(l int) []byte { + if l <= 127 { + return []byte{byte(l)} + } + var b []byte + p := 1 + for i := 1; i < 127; { + b = append([]byte{byte((l % (p * 256)) / p)}, b...) + p = p * 256 + l = l - l%p + if l <= 0 { + break + } + } + return append([]byte{byte(128 + len(b))}, b...) +} + +// GetLengthFromASN returns the length of a slice of ASN1 encoded bytes from the ASN1 length header it contains. +func GetLengthFromASN(b []byte) int { + if int(b[1]) <= 127 { + return int(b[1]) + } + // The bytes that indicate the length + lb := b[2 : 2+int(b[1])-128] + base := 1 + l := 0 + for i := len(lb) - 1; i >= 0; i-- { + l += int(lb[i]) * base + base = base * 256 + } + return l +} + +// GetNumberBytesInLengthHeader returns the number of bytes in the ASn1 header that indicate the length. +func GetNumberBytesInLengthHeader(b []byte) int { + if int(b[1]) <= 127 { + return 1 + } + // The bytes that indicate the length + return 1 + int(b[1]) - 128 +} + +// AddASNAppTag adds an ASN1 encoding application tag value to the raw bytes provided. +func AddASNAppTag(b []byte, tag int) []byte { + r := asn1.RawValue{ + Class: asn1.ClassApplication, + IsCompound: true, + Tag: tag, + Bytes: b, + } + ab, _ := asn1.Marshal(r) + return ab +} + +/* +// The Marshal method of golang's asn1 package does not enable you to define wrapping the output in an application tag. +// This method adds that wrapping tag. +func AddASNAppTag(b []byte, tag int) []byte { + // The ASN1 wrapping consists of 2 bytes: + // 1st byte -> Identifier Octet - Application Tag + // 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here. + // Application Tag: + //| Bit: | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | + //| Value: | 0 | 1 | 1 | From the RFC spec 4120 | + //| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value | + // Therefore the value of the byte is an integer = ( Application tag value + 96 ) + //b = append(MarshalLengthBytes(int(b[1])+2), b...) + b = append(MarshalLengthBytes(len(b)), b...) + b = append([]byte{byte(96 + tag)}, b...) + return b +} +*/ diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/ASExchange.go b/vendor/github.com/jcmturner/gokrb5/v8/client/ASExchange.go new file mode 100644 index 00000000..5becccc4 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/ASExchange.go @@ -0,0 +1,182 @@ +package client + +import ( + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/crypto/etype" + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/patype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// ASExchange performs an AS exchange for the client to retrieve a TGT. +func (cl *Client) ASExchange(realm string, ASReq messages.ASReq, referral int) (messages.ASRep, error) { + if ok, err := cl.IsConfigured(); !ok { + return messages.ASRep{}, krberror.Errorf(err, krberror.ConfigError, "AS Exchange cannot be performed") + } + + // Set PAData if required + err := setPAData(cl, nil, &ASReq) + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: issue with setting PAData on AS_REQ") + } + + b, err := ASReq.Marshal() + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ") + } + var ASRep messages.ASRep + + rb, err := cl.sendToKDC(b, realm) + if err != nil { + if e, ok := err.(messages.KRBError); ok { + switch e.ErrorCode { + case errorcode.KDC_ERR_PREAUTH_REQUIRED, errorcode.KDC_ERR_PREAUTH_FAILED: + // From now on assume this client will need to do this pre-auth and set the PAData + cl.settings.assumePreAuthentication = true + err = setPAData(cl, &e, &ASReq) + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: failed setting AS_REQ PAData for pre-authentication required") + } + b, err := ASReq.Marshal() + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ with PAData") + } + rb, err = cl.sendToKDC(b, realm) + if err != nil { + if _, ok := err.(messages.KRBError); ok { + return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC") + } + return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC") + } + case errorcode.KDC_ERR_WRONG_REALM: + // Client referral https://tools.ietf.org/html/rfc6806.html#section-7 + if referral > 5 { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "maximum number of client referrals exceeded") + } + referral++ + return cl.ASExchange(e.CRealm, ASReq, referral) + default: + return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC") + } + } else { + return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC") + } + } + err = ASRep.Unmarshal(rb) + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed to process the AS_REP") + } + if ok, err := ASRep.Verify(cl.Config, cl.Credentials, ASReq); !ok { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: AS_REP is not valid or client password/keytab incorrect") + } + return ASRep, nil +} + +// setPAData adds pre-authentication data to the AS_REQ. +func setPAData(cl *Client, krberr *messages.KRBError, ASReq *messages.ASReq) error { + if !cl.settings.DisablePAFXFAST() { + pa := types.PAData{PADataType: patype.PA_REQ_ENC_PA_REP} + ASReq.PAData = append(ASReq.PAData, pa) + } + if cl.settings.AssumePreAuthentication() { + // Identify the etype to use to encrypt the PA Data + var et etype.EType + var err error + var key types.EncryptionKey + var kvno int + if krberr == nil { + // This is not in response to an error from the KDC. It is preemptive or renewal + // There is no KRB Error that tells us the etype to use + etn := cl.settings.preAuthEType // Use the etype that may have previously been negotiated + if etn == 0 { + etn = int32(cl.Config.LibDefaults.PreferredPreauthTypes[0]) // Resort to config + } + et, err = crypto.GetEtype(etn) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption") + } + key, kvno, err = cl.Key(et, 0, nil) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting key from credentials") + } + } else { + // Get the etype to use from the PA data in the KRBError e-data + et, err = preAuthEType(krberr) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption") + } + cl.settings.preAuthEType = et.GetETypeID() // Set the etype that has been defined for potential future use + key, kvno, err = cl.Key(et, 0, krberr) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting key from credentials") + } + } + // Generate the PA data + paTSb, err := types.GetPAEncTSEncAsnMarshalled() + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error creating PAEncTSEnc for Pre-Authentication") + } + paEncTS, err := crypto.GetEncryptedData(paTSb, key, keyusage.AS_REQ_PA_ENC_TIMESTAMP, kvno) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error encrypting pre-authentication timestamp") + } + pb, err := paEncTS.Marshal() + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error marshaling the PAEncTSEnc encrypted data") + } + pa := types.PAData{ + PADataType: patype.PA_ENC_TIMESTAMP, + PADataValue: pb, + } + // Look for and delete any exiting patype.PA_ENC_TIMESTAMP + for i, pa := range ASReq.PAData { + if pa.PADataType == patype.PA_ENC_TIMESTAMP { + ASReq.PAData[i] = ASReq.PAData[len(ASReq.PAData)-1] + ASReq.PAData = ASReq.PAData[:len(ASReq.PAData)-1] + } + } + ASReq.PAData = append(ASReq.PAData, pa) + } + return nil +} + +// preAuthEType establishes what encryption type to use for pre-authentication from the KRBError returned from the KDC. +func preAuthEType(krberr *messages.KRBError) (etype etype.EType, err error) { + //RFC 4120 5.2.7.5 covers the preference order of ETYPE-INFO2 and ETYPE-INFO. + var etypeID int32 + var pas types.PADataSequence + e := pas.Unmarshal(krberr.EData) + if e != nil { + err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling KRBError data") + return + } +Loop: + for _, pa := range pas { + switch pa.PADataType { + case patype.PA_ETYPE_INFO2: + info, e := pa.GetETypeInfo2() + if e != nil { + err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO2 data") + return + } + etypeID = info[0].EType + break Loop + case patype.PA_ETYPE_INFO: + info, e := pa.GetETypeInfo() + if e != nil { + err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO data") + return + } + etypeID = info[0].EType + } + } + etype, e = crypto.GetEtype(etypeID) + if e != nil { + err = krberror.Errorf(e, krberror.EncryptingError, "error creating etype") + return + } + return etype, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go new file mode 100644 index 00000000..e4571ce8 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go @@ -0,0 +1,103 @@ +package client + +import ( + "github.com/jcmturner/gokrb5/v8/iana/flags" + "github.com/jcmturner/gokrb5/v8/iana/nametype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// TGSREQGenerateAndExchange generates the TGS_REQ and performs a TGS exchange to retrieve a ticket to the specified SPN. +func (cl *Client) TGSREQGenerateAndExchange(spn types.PrincipalName, kdcRealm string, tgt messages.Ticket, sessionKey types.EncryptionKey, renewal bool) (tgsReq messages.TGSReq, tgsRep messages.TGSRep, err error) { + tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, spn, renewal) + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "TGS Exchange Error: failed to generate a new TGS_REQ") + } + return cl.TGSExchange(tgsReq, kdcRealm, tgsRep.Ticket, sessionKey, 0) +} + +// TGSExchange exchanges the provided TGS_REQ with the KDC to retrieve a TGS_REP. +// Referrals are automatically handled. +// The client's cache is updated with the ticket received. +func (cl *Client) TGSExchange(tgsReq messages.TGSReq, kdcRealm string, tgt messages.Ticket, sessionKey types.EncryptionKey, referral int) (messages.TGSReq, messages.TGSRep, error) { + var tgsRep messages.TGSRep + b, err := tgsReq.Marshal() + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to marshal TGS_REQ") + } + r, err := cl.sendToKDC(b, kdcRealm) + if err != nil { + if _, ok := err.(messages.KRBError); ok { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.KDCError, "TGS Exchange Error: kerberos error response from KDC when requesting for %s", tgsReq.ReqBody.SName.PrincipalNameString()) + } + return tgsReq, tgsRep, krberror.Errorf(err, krberror.NetworkingError, "TGS Exchange Error: issue sending TGS_REQ to KDC") + } + err = tgsRep.Unmarshal(r) + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP") + } + err = tgsRep.DecryptEncPart(sessionKey) + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP") + } + if ok, err := tgsRep.Verify(cl.Config, tgsReq); !ok { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: TGS_REP is not valid") + } + + if tgsRep.Ticket.SName.NameString[0] == "krbtgt" && !tgsRep.Ticket.SName.Equal(tgsReq.ReqBody.SName) { + if referral > 5 { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "TGS Exchange Error: maximum number of referrals exceeded") + } + // Server referral https://tools.ietf.org/html/rfc6806.html#section-8 + // The TGS Rep contains a TGT for another domain as the service resides in that domain. + cl.addSession(tgsRep.Ticket, tgsRep.DecryptedEncPart) + realm := tgsRep.Ticket.SName.NameString[len(tgsRep.Ticket.SName.NameString)-1] + referral++ + if types.IsFlagSet(&tgsReq.ReqBody.KDCOptions, flags.EncTktInSkey) && len(tgsReq.ReqBody.AdditionalTickets) > 0 { + tgsReq, err = messages.NewUser2UserTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, tgsReq.ReqBody.SName, tgsReq.Renewal, tgsReq.ReqBody.AdditionalTickets[0]) + if err != nil { + return tgsReq, tgsRep, err + } + } + tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), realm, cl.Config, tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, tgsReq.ReqBody.SName, tgsReq.Renewal) + if err != nil { + return tgsReq, tgsRep, err + } + return cl.TGSExchange(tgsReq, realm, tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, referral) + } + cl.cache.addEntry( + tgsRep.Ticket, + tgsRep.DecryptedEncPart.AuthTime, + tgsRep.DecryptedEncPart.StartTime, + tgsRep.DecryptedEncPart.EndTime, + tgsRep.DecryptedEncPart.RenewTill, + tgsRep.DecryptedEncPart.Key, + ) + cl.Log("ticket added to cache for %s (EndTime: %v)", tgsRep.Ticket.SName.PrincipalNameString(), tgsRep.DecryptedEncPart.EndTime) + return tgsReq, tgsRep, err +} + +// GetServiceTicket makes a request to get a service ticket for the SPN specified +// SPN format: / Eg. HTTP/www.example.com +// The ticket will be added to the client's ticket cache +func (cl *Client) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) { + var tkt messages.Ticket + var skey types.EncryptionKey + if tkt, skey, ok := cl.GetCachedTicket(spn); ok { + // Already a valid ticket in the cache + return tkt, skey, nil + } + princ := types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn) + realm := cl.Config.ResolveRealm(princ.NameString[len(princ.NameString)-1]) + + tgt, skey, err := cl.sessionTGT(realm) + if err != nil { + return tkt, skey, err + } + _, tgsRep, err := cl.TGSREQGenerateAndExchange(princ, realm, tgt, skey, false) + if err != nil { + return tkt, skey, err + } + return tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/cache.go b/vendor/github.com/jcmturner/gokrb5/v8/client/cache.go new file mode 100644 index 00000000..552e73e4 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/cache.go @@ -0,0 +1,134 @@ +package client + +import ( + "encoding/json" + "errors" + "sort" + "sync" + "time" + + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// Cache for service tickets held by the client. +type Cache struct { + Entries map[string]CacheEntry + mux sync.RWMutex +} + +// CacheEntry holds details for a cache entry. +type CacheEntry struct { + SPN string + Ticket messages.Ticket `json:"-"` + AuthTime time.Time + StartTime time.Time + EndTime time.Time + RenewTill time.Time + SessionKey types.EncryptionKey `json:"-"` +} + +// NewCache creates a new client ticket cache instance. +func NewCache() *Cache { + return &Cache{ + Entries: map[string]CacheEntry{}, + } +} + +// getEntry returns a cache entry that matches the SPN. +func (c *Cache) getEntry(spn string) (CacheEntry, bool) { + c.mux.RLock() + defer c.mux.RUnlock() + e, ok := (*c).Entries[spn] + return e, ok +} + +// JSON returns information about the cached service tickets in a JSON format. +func (c *Cache) JSON() (string, error) { + c.mux.RLock() + defer c.mux.RUnlock() + var es []CacheEntry + keys := make([]string, 0, len(c.Entries)) + for k := range c.Entries { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + es = append(es, c.Entries[k]) + } + b, err := json.MarshalIndent(&es, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} + +// addEntry adds a ticket to the cache. +func (c *Cache) addEntry(tkt messages.Ticket, authTime, startTime, endTime, renewTill time.Time, sessionKey types.EncryptionKey) CacheEntry { + spn := tkt.SName.PrincipalNameString() + c.mux.Lock() + defer c.mux.Unlock() + (*c).Entries[spn] = CacheEntry{ + SPN: spn, + Ticket: tkt, + AuthTime: authTime, + StartTime: startTime, + EndTime: endTime, + RenewTill: renewTill, + SessionKey: sessionKey, + } + return c.Entries[spn] +} + +// clear deletes all the cache entries +func (c *Cache) clear() { + c.mux.Lock() + defer c.mux.Unlock() + for k := range c.Entries { + delete(c.Entries, k) + } +} + +// RemoveEntry removes the cache entry for the defined SPN. +func (c *Cache) RemoveEntry(spn string) { + c.mux.Lock() + defer c.mux.Unlock() + delete(c.Entries, spn) +} + +// GetCachedTicket returns a ticket from the cache for the SPN. +// Only a ticket that is currently valid will be returned. +func (cl *Client) GetCachedTicket(spn string) (messages.Ticket, types.EncryptionKey, bool) { + if e, ok := cl.cache.getEntry(spn); ok { + //If within time window of ticket return it + if time.Now().UTC().After(e.StartTime) && time.Now().UTC().Before(e.EndTime) { + cl.Log("ticket received from cache for %s", spn) + return e.Ticket, e.SessionKey, true + } else if time.Now().UTC().Before(e.RenewTill) { + e, err := cl.renewTicket(e) + if err != nil { + return e.Ticket, e.SessionKey, false + } + return e.Ticket, e.SessionKey, true + } + } + var tkt messages.Ticket + var key types.EncryptionKey + return tkt, key, false +} + +// renewTicket renews a cache entry ticket. +// To renew from outside the client package use GetCachedTicket +func (cl *Client) renewTicket(e CacheEntry) (CacheEntry, error) { + spn := e.Ticket.SName + _, _, err := cl.TGSREQGenerateAndExchange(spn, e.Ticket.Realm, e.Ticket, e.SessionKey, true) + if err != nil { + return e, err + } + e, ok := cl.cache.getEntry(e.Ticket.SName.PrincipalNameString()) + if !ok { + return e, errors.New("ticket was not added to cache") + } + cl.Log("ticket renewed for %s (EndTime: %v)", spn.PrincipalNameString(), e.EndTime) + return e, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/client.go b/vendor/github.com/jcmturner/gokrb5/v8/client/client.go new file mode 100644 index 00000000..074e3f12 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/client.go @@ -0,0 +1,329 @@ +// Package client provides a client library and methods for Kerberos 5 authentication. +package client + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/credentials" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/crypto/etype" + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/iana/nametype" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// Client side configuration and state. +type Client struct { + Credentials *credentials.Credentials + Config *config.Config + settings *Settings + sessions *sessions + cache *Cache +} + +// NewWithPassword creates a new client from a password credential. +// Set the realm to empty string to use the default realm from config. +func NewWithPassword(username, realm, password string, krb5conf *config.Config, settings ...func(*Settings)) *Client { + creds := credentials.New(username, realm) + return &Client{ + Credentials: creds.WithPassword(password), + Config: krb5conf, + settings: NewSettings(settings...), + sessions: &sessions{ + Entries: make(map[string]*session), + }, + cache: NewCache(), + } +} + +// NewWithKeytab creates a new client from a keytab credential. +func NewWithKeytab(username, realm string, kt *keytab.Keytab, krb5conf *config.Config, settings ...func(*Settings)) *Client { + creds := credentials.New(username, realm) + return &Client{ + Credentials: creds.WithKeytab(kt), + Config: krb5conf, + settings: NewSettings(settings...), + sessions: &sessions{ + Entries: make(map[string]*session), + }, + cache: NewCache(), + } +} + +// NewFromCCache create a client from a populated client cache. +// +// WARNING: A client created from CCache does not automatically renew TGTs and a failure will occur after the TGT expires. +func NewFromCCache(c *credentials.CCache, krb5conf *config.Config, settings ...func(*Settings)) (*Client, error) { + cl := &Client{ + Credentials: c.GetClientCredentials(), + Config: krb5conf, + settings: NewSettings(settings...), + sessions: &sessions{ + Entries: make(map[string]*session), + }, + cache: NewCache(), + } + spn := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", c.DefaultPrincipal.Realm}, + } + cred, ok := c.GetEntry(spn) + if !ok { + return cl, errors.New("TGT not found in CCache") + } + var tgt messages.Ticket + err := tgt.Unmarshal(cred.Ticket) + if err != nil { + return cl, fmt.Errorf("TGT bytes in cache are not valid: %v", err) + } + cl.sessions.Entries[c.DefaultPrincipal.Realm] = &session{ + realm: c.DefaultPrincipal.Realm, + authTime: cred.AuthTime, + endTime: cred.EndTime, + renewTill: cred.RenewTill, + tgt: tgt, + sessionKey: cred.Key, + } + for _, cred := range c.GetEntries() { + var tkt messages.Ticket + err = tkt.Unmarshal(cred.Ticket) + if err != nil { + return cl, fmt.Errorf("cache entry ticket bytes are not valid: %v", err) + } + cl.cache.addEntry( + tkt, + cred.AuthTime, + cred.StartTime, + cred.EndTime, + cred.RenewTill, + cred.Key, + ) + } + return cl, nil +} + +// Key returns the client's encryption key for the specified encryption type and its kvno (kvno of zero will find latest). +// The key can be retrieved either from the keytab or generated from the client's password. +// If the client has both a keytab and a password defined the keytab is favoured as the source for the key +// A KRBError can be passed in the event the KDC returns one of type KDC_ERR_PREAUTH_REQUIRED and is required to derive +// the key for pre-authentication from the client's password. If a KRBError is not available, pass nil to this argument. +func (cl *Client) Key(etype etype.EType, kvno int, krberr *messages.KRBError) (types.EncryptionKey, int, error) { + if cl.Credentials.HasKeytab() && etype != nil { + return cl.Credentials.Keytab().GetEncryptionKey(cl.Credentials.CName(), cl.Credentials.Domain(), kvno, etype.GetETypeID()) + } else if cl.Credentials.HasPassword() { + if krberr != nil && krberr.ErrorCode == errorcode.KDC_ERR_PREAUTH_REQUIRED { + var pas types.PADataSequence + err := pas.Unmarshal(krberr.EData) + if err != nil { + return types.EncryptionKey{}, 0, fmt.Errorf("could not get PAData from KRBError to generate key from password: %v", err) + } + key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password(), krberr.CName, krberr.CRealm, etype.GetETypeID(), pas) + return key, 0, err + } + key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password(), cl.Credentials.CName(), cl.Credentials.Domain(), etype.GetETypeID(), types.PADataSequence{}) + return key, 0, err + } + return types.EncryptionKey{}, 0, errors.New("credential has neither keytab or password to generate key") +} + +// IsConfigured indicates if the client has the values required set. +func (cl *Client) IsConfigured() (bool, error) { + if cl.Credentials.UserName() == "" { + return false, errors.New("client does not have a username") + } + if cl.Credentials.Domain() == "" { + return false, errors.New("client does not have a define realm") + } + // Client needs to have either a password, keytab or a session already (later when loading from CCache) + if !cl.Credentials.HasPassword() && !cl.Credentials.HasKeytab() { + authTime, _, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil || authTime.IsZero() { + return false, errors.New("client has neither a keytab nor a password set and no session") + } + } + if !cl.Config.LibDefaults.DNSLookupKDC { + for _, r := range cl.Config.Realms { + if r.Realm == cl.Credentials.Domain() { + if len(r.KDC) > 0 { + return true, nil + } + return false, errors.New("client krb5 config does not have any defined KDCs for the default realm") + } + } + } + return true, nil +} + +// Login the client with the KDC via an AS exchange. +func (cl *Client) Login() error { + if ok, err := cl.IsConfigured(); !ok { + return err + } + if !cl.Credentials.HasPassword() && !cl.Credentials.HasKeytab() { + _, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "no user credentials available and error getting any existing session") + } + if time.Now().UTC().After(endTime) { + return krberror.New(krberror.KRBMsgError, "cannot login, no user credentials available and no valid existing session") + } + // no credentials but there is a session with tgt already + return nil + } + ASReq, err := messages.NewASReqForTGT(cl.Credentials.Domain(), cl.Config, cl.Credentials.CName()) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AS_REQ") + } + ASRep, err := cl.ASExchange(cl.Credentials.Domain(), ASReq, 0) + if err != nil { + return err + } + cl.addSession(ASRep.Ticket, ASRep.DecryptedEncPart) + return nil +} + +// AffirmLogin will only perform an AS exchange with the KDC if the client does not already have a TGT. +func (cl *Client) AffirmLogin() error { + _, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil || time.Now().UTC().After(endTime) { + err := cl.Login() + if err != nil { + return fmt.Errorf("could not get valid TGT for client's realm: %v", err) + } + } + return nil +} + +// realmLogin obtains or renews a TGT and establishes a session for the realm specified. +func (cl *Client) realmLogin(realm string) error { + if realm == cl.Credentials.Domain() { + return cl.Login() + } + _, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil || time.Now().UTC().After(endTime) { + err := cl.Login() + if err != nil { + return fmt.Errorf("could not get valid TGT for client's realm: %v", err) + } + } + tgt, skey, err := cl.sessionTGT(cl.Credentials.Domain()) + if err != nil { + return err + } + + spn := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", realm}, + } + + _, tgsRep, err := cl.TGSREQGenerateAndExchange(spn, cl.Credentials.Domain(), tgt, skey, false) + if err != nil { + return err + } + cl.addSession(tgsRep.Ticket, tgsRep.DecryptedEncPart) + + return nil +} + +// Destroy stops the auto-renewal of all sessions and removes the sessions and cache entries from the client. +func (cl *Client) Destroy() { + creds := credentials.New("", "") + cl.sessions.destroy() + cl.cache.clear() + cl.Credentials = creds + cl.Log("client destroyed") +} + +// Diagnostics runs a set of checks that the client is properly configured and writes details to the io.Writer provided. +func (cl *Client) Diagnostics(w io.Writer) error { + cl.Print(w) + var errs []string + if cl.Credentials.HasKeytab() { + var loginRealmEncTypes []int32 + for _, e := range cl.Credentials.Keytab().Entries { + if e.Principal.Realm == cl.Credentials.Realm() { + loginRealmEncTypes = append(loginRealmEncTypes, e.Key.KeyType) + } + } + for _, et := range cl.Config.LibDefaults.DefaultTktEnctypeIDs { + var etInKt bool + for _, val := range loginRealmEncTypes { + if val == et { + etInKt = true + break + } + } + if !etInKt { + errs = append(errs, fmt.Sprintf("default_tkt_enctypes specifies %d but this enctype is not available in the client's keytab", et)) + } + } + for _, et := range cl.Config.LibDefaults.PreferredPreauthTypes { + var etInKt bool + for _, val := range loginRealmEncTypes { + if int(val) == et { + etInKt = true + break + } + } + if !etInKt { + errs = append(errs, fmt.Sprintf("preferred_preauth_types specifies %d but this enctype is not available in the client's keytab", et)) + } + } + } + udpCnt, udpKDC, err := cl.Config.GetKDCs(cl.Credentials.Realm(), false) + if err != nil { + errs = append(errs, fmt.Sprintf("error when resolving KDCs for UDP communication: %v", err)) + } + if udpCnt < 1 { + errs = append(errs, "no KDCs resolved for communication via UDP.") + } else { + b, _ := json.MarshalIndent(&udpKDC, "", " ") + fmt.Fprintf(w, "UDP KDCs: %s\n", string(b)) + } + tcpCnt, tcpKDC, err := cl.Config.GetKDCs(cl.Credentials.Realm(), false) + if err != nil { + errs = append(errs, fmt.Sprintf("error when resolving KDCs for TCP communication: %v", err)) + } + if tcpCnt < 1 { + errs = append(errs, "no KDCs resolved for communication via TCP.") + } else { + b, _ := json.MarshalIndent(&tcpKDC, "", " ") + fmt.Fprintf(w, "TCP KDCs: %s\n", string(b)) + } + + if errs == nil || len(errs) < 1 { + return nil + } + err = fmt.Errorf(strings.Join(errs, "\n")) + return err +} + +// Print writes the details of the client to the io.Writer provided. +func (cl *Client) Print(w io.Writer) { + c, _ := cl.Credentials.JSON() + fmt.Fprintf(w, "Credentials:\n%s\n", c) + + s, _ := cl.sessions.JSON() + fmt.Fprintf(w, "TGT Sessions:\n%s\n", s) + + c, _ = cl.cache.JSON() + fmt.Fprintf(w, "Service ticket cache:\n%s\n", c) + + s, _ = cl.settings.JSON() + fmt.Fprintf(w, "Settings:\n%s\n", s) + + j, _ := cl.Config.JSON() + fmt.Fprintf(w, "Krb5 config:\n%s\n", j) + + k, _ := cl.Credentials.Keytab().JSON() + fmt.Fprintf(w, "Keytab:\n%s\n", k) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/network.go b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go new file mode 100644 index 00000000..634f015c --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go @@ -0,0 +1,218 @@ +package client + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "strings" + "time" + + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/messages" +) + +// SendToKDC performs network actions to send data to the KDC. +func (cl *Client) sendToKDC(b []byte, realm string) ([]byte, error) { + var rb []byte + if cl.Config.LibDefaults.UDPPreferenceLimit == 1 { + //1 means we should always use TCP + rb, errtcp := cl.sendKDCTCP(realm, b) + if errtcp != nil { + if e, ok := errtcp.(messages.KRBError); ok { + return rb, e + } + return rb, fmt.Errorf("communication error with KDC via TCP: %v", errtcp) + } + return rb, nil + } + if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit { + //Try UDP first, TCP second + rb, errudp := cl.sendKDCUDP(realm, b) + if errudp != nil { + if e, ok := errudp.(messages.KRBError); ok && e.ErrorCode != errorcode.KRB_ERR_RESPONSE_TOO_BIG { + // Got a KRBError from KDC + // If this is not a KRB_ERR_RESPONSE_TOO_BIG we will return immediately otherwise will try TCP. + return rb, e + } + // Try TCP + r, errtcp := cl.sendKDCTCP(realm, b) + if errtcp != nil { + if e, ok := errtcp.(messages.KRBError); ok { + // Got a KRBError + return r, e + } + return r, fmt.Errorf("failed to communicate with KDC. Attempts made with UDP (%v) and then TCP (%v)", errudp, errtcp) + } + rb = r + } + return rb, nil + } + //Try TCP first, UDP second + rb, errtcp := cl.sendKDCTCP(realm, b) + if errtcp != nil { + if e, ok := errtcp.(messages.KRBError); ok { + // Got a KRBError from KDC so returning and not trying UDP. + return rb, e + } + rb, errudp := cl.sendKDCUDP(realm, b) + if errudp != nil { + if e, ok := errudp.(messages.KRBError); ok { + // Got a KRBError + return rb, e + } + return rb, fmt.Errorf("failed to communicate with KDC. Attempts made with TCP (%v) and then UDP (%v)", errtcp, errudp) + } + } + return rb, nil +} + +// sendKDCUDP sends bytes to the KDC via UDP. +func (cl *Client) sendKDCUDP(realm string, b []byte) ([]byte, error) { + var r []byte + _, kdcs, err := cl.Config.GetKDCs(realm, false) + if err != nil { + return r, err + } + r, err = dialSendUDP(kdcs, b) + if err != nil { + return r, err + } + return checkForKRBError(r) +} + +// dialSendUDP establishes a UDP connection to a KDC. +func dialSendUDP(kdcs map[int]string, b []byte) ([]byte, error) { + var errs []string + for i := 1; i <= len(kdcs); i++ { + udpAddr, err := net.ResolveUDPAddr("udp", kdcs[i]) + if err != nil { + errs = append(errs, fmt.Sprintf("error resolving KDC address: %v", err)) + continue + } + + conn, err := net.DialTimeout("udp", udpAddr.String(), 5*time.Second) + if err != nil { + errs = append(errs, fmt.Sprintf("error setting dial timeout on connection to %s: %v", kdcs[i], err)) + continue + } + if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil { + errs = append(errs, fmt.Sprintf("error setting deadline on connection to %s: %v", kdcs[i], err)) + continue + } + // conn is guaranteed to be a UDPConn + rb, err := sendUDP(conn.(*net.UDPConn), b) + if err != nil { + errs = append(errs, fmt.Sprintf("error sneding to %s: %v", kdcs[i], err)) + continue + } + return rb, nil + } + return nil, fmt.Errorf("error sending to a KDC: %s", strings.Join(errs, "; ")) +} + +// sendUDP sends bytes to connection over UDP. +func sendUDP(conn *net.UDPConn, b []byte) ([]byte, error) { + var r []byte + defer conn.Close() + _, err := conn.Write(b) + if err != nil { + return r, fmt.Errorf("error sending to (%s): %v", conn.RemoteAddr().String(), err) + } + udpbuf := make([]byte, 4096) + n, _, err := conn.ReadFrom(udpbuf) + r = udpbuf[:n] + if err != nil { + return r, fmt.Errorf("sending over UDP failed to %s: %v", conn.RemoteAddr().String(), err) + } + if len(r) < 1 { + return r, fmt.Errorf("no response data from %s", conn.RemoteAddr().String()) + } + return r, nil +} + +// sendKDCTCP sends bytes to the KDC via TCP. +func (cl *Client) sendKDCTCP(realm string, b []byte) ([]byte, error) { + var r []byte + _, kdcs, err := cl.Config.GetKDCs(realm, true) + if err != nil { + return r, err + } + r, err = dialSendTCP(kdcs, b) + if err != nil { + return r, err + } + return checkForKRBError(r) +} + +// dialKDCTCP establishes a TCP connection to a KDC. +func dialSendTCP(kdcs map[int]string, b []byte) ([]byte, error) { + var errs []string + for i := 1; i <= len(kdcs); i++ { + tcpAddr, err := net.ResolveTCPAddr("tcp", kdcs[i]) + if err != nil { + errs = append(errs, fmt.Sprintf("error resolving KDC address: %v", err)) + continue + } + + conn, err := net.DialTimeout("tcp", tcpAddr.String(), 5*time.Second) + if err != nil { + errs = append(errs, fmt.Sprintf("error setting dial timeout on connection to %s: %v", kdcs[i], err)) + continue + } + if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil { + errs = append(errs, fmt.Sprintf("error setting deadline on connection to %s: %v", kdcs[i], err)) + continue + } + // conn is guaranteed to be a TCPConn + rb, err := sendTCP(conn.(*net.TCPConn), b) + if err != nil { + errs = append(errs, fmt.Sprintf("error sneding to %s: %v", kdcs[i], err)) + continue + } + return rb, nil + } + return nil, errors.New("error in getting a TCP connection to any of the KDCs") +} + +// sendTCP sends bytes to connection over TCP. +func sendTCP(conn *net.TCPConn, b []byte) ([]byte, error) { + defer conn.Close() + var r []byte + // RFC 4120 7.2.2 specifies the first 4 bytes indicate the length of the message in big endian order. + hb := make([]byte, 4, 4) + binary.BigEndian.PutUint32(hb, uint32(len(b))) + b = append(hb, b...) + + _, err := conn.Write(b) + if err != nil { + return r, fmt.Errorf("error sending to KDC (%s): %v", conn.RemoteAddr().String(), err) + } + + sh := make([]byte, 4, 4) + _, err = conn.Read(sh) + if err != nil { + return r, fmt.Errorf("error reading response size header: %v", err) + } + s := binary.BigEndian.Uint32(sh) + + rb := make([]byte, s, s) + _, err = io.ReadFull(conn, rb) + if err != nil { + return r, fmt.Errorf("error reading response: %v", err) + } + if len(rb) < 1 { + return r, fmt.Errorf("no response data from KDC %s", conn.RemoteAddr().String()) + } + return rb, nil +} + +// checkForKRBError checks if the response bytes from the KDC are a KRBError. +func checkForKRBError(b []byte) ([]byte, error) { + var KRBErr messages.KRBError + if err := KRBErr.Unmarshal(b); err == nil { + return b, KRBErr + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/passwd.go b/vendor/github.com/jcmturner/gokrb5/v8/client/passwd.go new file mode 100644 index 00000000..fe20559c --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/passwd.go @@ -0,0 +1,75 @@ +package client + +import ( + "fmt" + + "github.com/jcmturner/gokrb5/v8/kadmin" + "github.com/jcmturner/gokrb5/v8/messages" +) + +// Kpasswd server response codes. +const ( + KRB5_KPASSWD_SUCCESS = 0 + KRB5_KPASSWD_MALFORMED = 1 + KRB5_KPASSWD_HARDERROR = 2 + KRB5_KPASSWD_AUTHERROR = 3 + KRB5_KPASSWD_SOFTERROR = 4 + KRB5_KPASSWD_ACCESSDENIED = 5 + KRB5_KPASSWD_BAD_VERSION = 6 + KRB5_KPASSWD_INITIAL_FLAG_NEEDED = 7 +) + +// ChangePasswd changes the password of the client to the value provided. +func (cl *Client) ChangePasswd(newPasswd string) (bool, error) { + ASReq, err := messages.NewASReqForChgPasswd(cl.Credentials.Domain(), cl.Config, cl.Credentials.CName()) + if err != nil { + return false, err + } + ASRep, err := cl.ASExchange(cl.Credentials.Domain(), ASReq, 0) + if err != nil { + return false, err + } + + msg, key, err := kadmin.ChangePasswdMsg(cl.Credentials.CName(), cl.Credentials.Domain(), newPasswd, ASRep.Ticket, ASRep.DecryptedEncPart.Key) + if err != nil { + return false, err + } + r, err := cl.sendToKPasswd(msg) + if err != nil { + return false, err + } + err = r.Decrypt(key) + if err != nil { + return false, err + } + if r.ResultCode != KRB5_KPASSWD_SUCCESS { + return false, fmt.Errorf("error response from kadmin: code: %d; result: %s; krberror: %v", r.ResultCode, r.Result, r.KRBError) + } + cl.Credentials.WithPassword(newPasswd) + return true, nil +} + +func (cl *Client) sendToKPasswd(msg kadmin.Request) (r kadmin.Reply, err error) { + _, kps, err := cl.Config.GetKpasswdServers(cl.Credentials.Domain(), true) + if err != nil { + return + } + b, err := msg.Marshal() + if err != nil { + return + } + var rb []byte + if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit { + rb, err = dialSendUDP(kps, b) + if err != nil { + return + } + } else { + rb, err = dialSendTCP(kps, b) + if err != nil { + return + } + } + err = r.Unmarshal(rb) + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/session.go b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go new file mode 100644 index 00000000..f7654d0d --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go @@ -0,0 +1,295 @@ +package client + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/jcmturner/gokrb5/v8/iana/nametype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// sessions hold TGTs and are keyed on the realm name +type sessions struct { + Entries map[string]*session + mux sync.RWMutex +} + +// destroy erases all sessions +func (s *sessions) destroy() { + s.mux.Lock() + defer s.mux.Unlock() + for k, e := range s.Entries { + e.destroy() + delete(s.Entries, k) + } +} + +// update replaces a session with the one provided or adds it as a new one +func (s *sessions) update(sess *session) { + s.mux.Lock() + defer s.mux.Unlock() + // if a session already exists for this, cancel its auto renew. + if i, ok := s.Entries[sess.realm]; ok { + if i != sess { + // Session in the sessions cache is not the same as one provided. + // Cancel the one in the cache and add this one. + i.mux.Lock() + defer i.mux.Unlock() + i.cancel <- true + s.Entries[sess.realm] = sess + return + } + } + // No session for this realm was found so just add it + s.Entries[sess.realm] = sess +} + +// get returns the session for the realm specified +func (s *sessions) get(realm string) (*session, bool) { + s.mux.RLock() + defer s.mux.RUnlock() + sess, ok := s.Entries[realm] + return sess, ok +} + +// session holds the TGT details for a realm +type session struct { + realm string + authTime time.Time + endTime time.Time + renewTill time.Time + tgt messages.Ticket + sessionKey types.EncryptionKey + sessionKeyExpiration time.Time + cancel chan bool + mux sync.RWMutex +} + +// jsonSession is used to enable marshaling some information of a session in a JSON format +type jsonSession struct { + Realm string + AuthTime time.Time + EndTime time.Time + RenewTill time.Time + SessionKeyExpiration time.Time +} + +// AddSession adds a session for a realm with a TGT to the client's session cache. +// A goroutine is started to automatically renew the TGT before expiry. +func (cl *Client) addSession(tgt messages.Ticket, dep messages.EncKDCRepPart) { + if strings.ToLower(tgt.SName.NameString[0]) != "krbtgt" { + // Not a TGT + return + } + realm := tgt.SName.NameString[len(tgt.SName.NameString)-1] + s := &session{ + realm: realm, + authTime: dep.AuthTime, + endTime: dep.EndTime, + renewTill: dep.RenewTill, + tgt: tgt, + sessionKey: dep.Key, + sessionKeyExpiration: dep.KeyExpiration, + } + cl.sessions.update(s) + cl.enableAutoSessionRenewal(s) + cl.Log("TGT session added for %s (EndTime: %v)", realm, dep.EndTime) +} + +// update overwrites the session details with those from the TGT and decrypted encPart +func (s *session) update(tgt messages.Ticket, dep messages.EncKDCRepPart) { + s.mux.Lock() + defer s.mux.Unlock() + s.authTime = dep.AuthTime + s.endTime = dep.EndTime + s.renewTill = dep.RenewTill + s.tgt = tgt + s.sessionKey = dep.Key + s.sessionKeyExpiration = dep.KeyExpiration +} + +// destroy will cancel any auto renewal of the session and set the expiration times to the current time +func (s *session) destroy() { + s.mux.Lock() + defer s.mux.Unlock() + if s.cancel != nil { + s.cancel <- true + } + s.endTime = time.Now().UTC() + s.renewTill = s.endTime + s.sessionKeyExpiration = s.endTime +} + +// valid informs if the TGT is still within the valid time window +func (s *session) valid() bool { + s.mux.RLock() + defer s.mux.RUnlock() + t := time.Now().UTC() + if t.Before(s.endTime) && s.authTime.Before(t) { + return true + } + return false +} + +// tgtDetails is a thread safe way to get the session's realm, TGT and session key values +func (s *session) tgtDetails() (string, messages.Ticket, types.EncryptionKey) { + s.mux.RLock() + defer s.mux.RUnlock() + return s.realm, s.tgt, s.sessionKey +} + +// timeDetails is a thread safe way to get the session's validity time values +func (s *session) timeDetails() (string, time.Time, time.Time, time.Time, time.Time) { + s.mux.RLock() + defer s.mux.RUnlock() + return s.realm, s.authTime, s.endTime, s.renewTill, s.sessionKeyExpiration +} + +// JSON return information about the held sessions in a JSON format. +func (s *sessions) JSON() (string, error) { + s.mux.RLock() + defer s.mux.RUnlock() + var js []jsonSession + keys := make([]string, 0, len(s.Entries)) + for k := range s.Entries { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + r, at, et, rt, kt := s.Entries[k].timeDetails() + j := jsonSession{ + Realm: r, + AuthTime: at, + EndTime: et, + RenewTill: rt, + SessionKeyExpiration: kt, + } + js = append(js, j) + } + b, err := json.MarshalIndent(js, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} + +// enableAutoSessionRenewal turns on the automatic renewal for the client's TGT session. +func (cl *Client) enableAutoSessionRenewal(s *session) { + var timer *time.Timer + s.mux.Lock() + s.cancel = make(chan bool, 1) + s.mux.Unlock() + go func(s *session) { + for { + s.mux.RLock() + w := (s.endTime.Sub(time.Now().UTC()) * 5) / 6 + s.mux.RUnlock() + if w < 0 { + return + } + timer = time.NewTimer(w) + select { + case <-timer.C: + renewal, err := cl.refreshSession(s) + if err != nil { + cl.Log("error refreshing session: %v", err) + } + if !renewal && err == nil { + // end this goroutine as there will have been a new login and new auto renewal goroutine created. + return + } + case <-s.cancel: + // cancel has been called. Stop the timer and exit. + timer.Stop() + return + } + } + }(s) +} + +// renewTGT renews the client's TGT session. +func (cl *Client) renewTGT(s *session) error { + realm, tgt, skey := s.tgtDetails() + spn := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", realm}, + } + _, tgsRep, err := cl.TGSREQGenerateAndExchange(spn, cl.Credentials.Domain(), tgt, skey, true) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error renewing TGT for %s", realm) + } + s.update(tgsRep.Ticket, tgsRep.DecryptedEncPart) + cl.sessions.update(s) + cl.Log("TGT session renewed for %s (EndTime: %v)", realm, tgsRep.DecryptedEncPart.EndTime) + return nil +} + +// refreshSession updates either through renewal or creating a new login. +// The boolean indicates if the update was a renewal. +func (cl *Client) refreshSession(s *session) (bool, error) { + s.mux.RLock() + realm := s.realm + renewTill := s.renewTill + s.mux.RUnlock() + cl.Log("refreshing TGT session for %s", realm) + if time.Now().UTC().Before(renewTill) { + err := cl.renewTGT(s) + return true, err + } + err := cl.realmLogin(realm) + return false, err +} + +// ensureValidSession makes sure there is a valid session for the realm +func (cl *Client) ensureValidSession(realm string) error { + s, ok := cl.sessions.get(realm) + if ok { + s.mux.RLock() + d := s.endTime.Sub(s.authTime) / 6 + if s.endTime.Sub(time.Now().UTC()) > d { + s.mux.RUnlock() + return nil + } + s.mux.RUnlock() + _, err := cl.refreshSession(s) + return err + } + return cl.realmLogin(realm) +} + +// sessionTGTDetails is a thread safe way to get the TGT and session key values for a realm +func (cl *Client) sessionTGT(realm string) (tgt messages.Ticket, sessionKey types.EncryptionKey, err error) { + err = cl.ensureValidSession(realm) + if err != nil { + return + } + s, ok := cl.sessions.get(realm) + if !ok { + err = fmt.Errorf("could not find TGT session for %s", realm) + return + } + _, tgt, sessionKey = s.tgtDetails() + return +} + +// sessionTimes provides the timing information with regards to a session for the realm specified. +func (cl *Client) sessionTimes(realm string) (authTime, endTime, renewTime, sessionExp time.Time, err error) { + s, ok := cl.sessions.get(realm) + if !ok { + err = fmt.Errorf("could not find TGT session for %s", realm) + return + } + _, authTime, endTime, renewTime, sessionExp = s.timeDetails() + return +} + +// spnRealm resolves the realm name of a service principal name +func (cl *Client) spnRealm(spn types.PrincipalName) string { + return cl.Config.ResolveRealm(spn.NameString[len(spn.NameString)-1]) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/settings.go b/vendor/github.com/jcmturner/gokrb5/v8/client/settings.go new file mode 100644 index 00000000..bcd39454 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/settings.go @@ -0,0 +1,93 @@ +package client + +import ( + "encoding/json" + "fmt" + "log" +) + +// Settings holds optional client settings. +type Settings struct { + disablePAFXFast bool + assumePreAuthentication bool + preAuthEType int32 + logger *log.Logger +} + +// jsonSettings is used when marshaling the Settings details to JSON format. +type jsonSettings struct { + DisablePAFXFast bool + AssumePreAuthentication bool +} + +// NewSettings creates a new client settings struct. +func NewSettings(settings ...func(*Settings)) *Settings { + s := new(Settings) + for _, set := range settings { + set(s) + } + return s +} + +// DisablePAFXFAST used to configure the client to not use PA_FX_FAST. +// +// s := NewSettings(DisablePAFXFAST(true)) +func DisablePAFXFAST(b bool) func(*Settings) { + return func(s *Settings) { + s.disablePAFXFast = b + } +} + +// DisablePAFXFAST indicates is the client should disable the use of PA_FX_FAST. +func (s *Settings) DisablePAFXFAST() bool { + return s.disablePAFXFast +} + +// AssumePreAuthentication used to configure the client to assume pre-authentication is required. +// +// s := NewSettings(AssumePreAuthentication(true)) +func AssumePreAuthentication(b bool) func(*Settings) { + return func(s *Settings) { + s.assumePreAuthentication = b + } +} + +// AssumePreAuthentication indicates if the client should proactively assume using pre-authentication. +func (s *Settings) AssumePreAuthentication() bool { + return s.assumePreAuthentication +} + +// Logger used to configure client with a logger. +// +// s := NewSettings(kt, Logger(l)) +func Logger(l *log.Logger) func(*Settings) { + return func(s *Settings) { + s.logger = l + } +} + +// Logger returns the client logger instance. +func (s *Settings) Logger() *log.Logger { + return s.logger +} + +// Log will write to the service's logger if it is configured. +func (cl *Client) Log(format string, v ...interface{}) { + if cl.settings.Logger() != nil { + cl.settings.Logger().Output(2, fmt.Sprintf(format, v...)) + } +} + +// JSON returns a JSON representation of the settings. +func (s *Settings) JSON() (string, error) { + js := jsonSettings{ + DisablePAFXFast: s.disablePAFXFast, + AssumePreAuthentication: s.assumePreAuthentication, + } + b, err := json.MarshalIndent(js, "", " ") + if err != nil { + return "", err + } + return string(b), nil + +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/error.go b/vendor/github.com/jcmturner/gokrb5/v8/config/error.go new file mode 100644 index 00000000..1fbda51f --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/config/error.go @@ -0,0 +1,30 @@ +package config + +import "fmt" + +// UnsupportedDirective error. +type UnsupportedDirective struct { + text string +} + +// Error implements the error interface for unsupported directives. +func (e UnsupportedDirective) Error() string { + return e.text +} + +// Invalid config error. +type Invalid struct { + text string +} + +// Error implements the error interface for invalid config error. +func (e Invalid) Error() string { + return e.text +} + +// InvalidErrorf creates a new Invalid error. +func InvalidErrorf(format string, a ...interface{}) Invalid { + return Invalid{ + text: fmt.Sprintf("invalid krb5 config "+format, a...), + } +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/hosts.go b/vendor/github.com/jcmturner/gokrb5/v8/config/hosts.go new file mode 100644 index 00000000..3f22c70c --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/config/hosts.go @@ -0,0 +1,141 @@ +package config + +import ( + "fmt" + "math/rand" + "net" + "strconv" + "strings" + + "github.com/jcmturner/dnsutils/v2" +) + +// GetKDCs returns the count of KDCs available and a map of KDC host names keyed on preference order. +func (c *Config) GetKDCs(realm string, tcp bool) (int, map[int]string, error) { + if realm == "" { + realm = c.LibDefaults.DefaultRealm + } + kdcs := make(map[int]string) + var count int + + // Get the KDCs from the krb5.conf. + var ks []string + for _, r := range c.Realms { + if r.Realm != realm { + continue + } + ks = r.KDC + } + count = len(ks) + + if count > 0 { + // Order the kdcs randomly for preference. + kdcs = randServOrder(ks) + return count, kdcs, nil + } + + if !c.LibDefaults.DNSLookupKDC { + return count, kdcs, fmt.Errorf("no KDCs defined in configuration for realm %s", realm) + } + + // Use DNS to resolve kerberos SRV records. + proto := "udp" + if tcp { + proto = "tcp" + } + index, addrs, err := dnsutils.OrderedSRV("kerberos", proto, realm) + if err != nil { + return count, kdcs, err + } + if len(addrs) < 1 { + return count, kdcs, fmt.Errorf("no KDC SRV records found for realm %s", realm) + } + count = index + for k, v := range addrs { + kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port)) + } + return count, kdcs, nil +} + +// GetKpasswdServers returns the count of kpasswd servers available and a map of kpasswd host names keyed on preference order. +// https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html#realms - see kpasswd_server section +func (c *Config) GetKpasswdServers(realm string, tcp bool) (int, map[int]string, error) { + kdcs := make(map[int]string) + var count int + + // Use DNS to resolve kerberos SRV records if configured to do so in krb5.conf. + if c.LibDefaults.DNSLookupKDC { + proto := "udp" + if tcp { + proto = "tcp" + } + c, addrs, err := dnsutils.OrderedSRV("kpasswd", proto, realm) + if err != nil { + return count, kdcs, err + } + if c < 1 { + c, addrs, err = dnsutils.OrderedSRV("kerberos-adm", proto, realm) + if err != nil { + return count, kdcs, err + } + } + if len(addrs) < 1 { + return count, kdcs, fmt.Errorf("no kpasswd or kadmin SRV records found for realm %s", realm) + } + count = c + for k, v := range addrs { + kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port)) + } + } else { + // Get the KDCs from the krb5.conf an order them randomly for preference. + var ks []string + var ka []string + for _, r := range c.Realms { + if r.Realm == realm { + ks = r.KPasswdServer + ka = r.AdminServer + break + } + } + if len(ks) < 1 { + for _, k := range ka { + h, _, err := net.SplitHostPort(k) + if err != nil { + continue + } + ks = append(ks, h+":464") + } + } + count = len(ks) + if count < 1 { + return count, kdcs, fmt.Errorf("no kpasswd or kadmin defined in configuration for realm %s", realm) + } + kdcs = randServOrder(ks) + } + return count, kdcs, nil +} + +func randServOrder(ks []string) map[int]string { + kdcs := make(map[int]string) + count := len(ks) + i := 1 + if count > 1 { + l := len(ks) + for l > 0 { + ri := rand.Intn(l) + kdcs[i] = ks[ri] + if l > 1 { + // Remove the entry from the source slice by swapping with the last entry and truncating + ks[len(ks)-1], ks[ri] = ks[ri], ks[len(ks)-1] + ks = ks[:len(ks)-1] + l = len(ks) + } else { + l = 0 + } + i++ + } + } else { + kdcs[i] = ks[0] + } + return kdcs +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go new file mode 100644 index 00000000..a7638433 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go @@ -0,0 +1,728 @@ +// Package config implements KRB5 client and service configuration as described at https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html +package config + +import ( + "bufio" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "os" + "os/user" + "regexp" + "strconv" + "strings" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// Config represents the KRB5 configuration. +type Config struct { + LibDefaults LibDefaults + Realms []Realm + DomainRealm DomainRealm + //CaPaths + //AppDefaults + //Plugins +} + +// WeakETypeList is a list of encryption types that have been deemed weak. +const WeakETypeList = "des-cbc-crc des-cbc-md4 des-cbc-md5 des-cbc-raw des3-cbc-raw des-hmac-sha1 arcfour-hmac-exp rc4-hmac-exp arcfour-hmac-md5-exp des" + +// New creates a new config struct instance. +func New() *Config { + d := make(DomainRealm) + return &Config{ + LibDefaults: newLibDefaults(), + DomainRealm: d, + } +} + +// LibDefaults represents the [libdefaults] section of the configuration. +type LibDefaults struct { + AllowWeakCrypto bool //default false + // ap_req_checksum_type int //unlikely to support this + Canonicalize bool //default false + CCacheType int //default is 4. unlikely to implement older + Clockskew time.Duration //max allowed skew in seconds, default 300 + //Default_ccache_name string // default /tmp/krb5cc_%{uid} //Not implementing as will hold in memory + DefaultClientKeytabName string //default /usr/local/var/krb5/user/%{euid}/client.keytab + DefaultKeytabName string //default /etc/krb5.keytab + DefaultRealm string + DefaultTGSEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DefaultTktEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DefaultTGSEnctypeIDs []int32 //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DefaultTktEnctypeIDs []int32 //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DNSCanonicalizeHostname bool //default true + DNSLookupKDC bool //default false + DNSLookupRealm bool + ExtraAddresses []net.IP //Not implementing yet + Forwardable bool //default false + IgnoreAcceptorHostname bool //default false + K5LoginAuthoritative bool //default false + K5LoginDirectory string //default user's home directory. Must be owned by the user or root + KDCDefaultOptions asn1.BitString //default 0x00000010 (KDC_OPT_RENEWABLE_OK) + KDCTimeSync int //default 1 + //kdc_req_checksum_type int //unlikely to implement as for very old KDCs + NoAddresses bool //default true + PermittedEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + PermittedEnctypeIDs []int32 + //plugin_base_dir string //not supporting plugins + PreferredPreauthTypes []int //default “17, 16, 15, 14â€, which forces libkrb5 to attempt to use PKINIT if it is supported + Proxiable bool //default false + RDNS bool //default true + RealmTryDomains int //default -1 + RenewLifetime time.Duration //default 0 + SafeChecksumType int //default 8 + TicketLifetime time.Duration //default 1 day + UDPPreferenceLimit int // 1 means to always use tcp. MIT krb5 has a default value of 1465, and it prevents user setting more than 32700. + VerifyAPReqNofail bool //default false +} + +// Create a new LibDefaults struct. +func newLibDefaults() LibDefaults { + uid := "0" + var hdir string + usr, _ := user.Current() + if usr != nil { + uid = usr.Uid + hdir = usr.HomeDir + } + opts := asn1.BitString{} + opts.Bytes, _ = hex.DecodeString("00000010") + opts.BitLength = len(opts.Bytes) * 8 + return LibDefaults{ + CCacheType: 4, + Clockskew: time.Duration(300) * time.Second, + DefaultClientKeytabName: fmt.Sprintf("/usr/local/var/krb5/user/%s/client.keytab", uid), + DefaultKeytabName: "/etc/krb5.keytab", + DefaultTGSEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, + DefaultTktEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, + DNSCanonicalizeHostname: true, + K5LoginDirectory: hdir, + KDCDefaultOptions: opts, + KDCTimeSync: 1, + NoAddresses: true, + PermittedEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, + RDNS: true, + RealmTryDomains: -1, + SafeChecksumType: 8, + TicketLifetime: time.Duration(24) * time.Hour, + UDPPreferenceLimit: 1465, + PreferredPreauthTypes: []int{17, 16, 15, 14}, + } +} + +// Parse the lines of the [libdefaults] section of the configuration into the LibDefaults struct. +func (l *LibDefaults) parseLines(lines []string) error { + for _, line := range lines { + //Remove comments after the values + if idx := strings.IndexAny(line, "#;"); idx != -1 { + line = line[:idx] + } + line = strings.TrimSpace(line) + if line == "" { + continue + } + if !strings.Contains(line, "=") { + return InvalidErrorf("libdefaults section line (%s)", line) + } + + p := strings.Split(line, "=") + key := strings.TrimSpace(strings.ToLower(p[0])) + switch key { + case "allow_weak_crypto": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.AllowWeakCrypto = v + case "canonicalize": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Canonicalize = v + case "ccache_type": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseUint(p[1], 10, 32) + if err != nil || v < 0 || v > 4 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.CCacheType = int(v) + case "clockskew": + d, err := parseDuration(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Clockskew = d + case "default_client_keytab_name": + l.DefaultClientKeytabName = strings.TrimSpace(p[1]) + case "default_keytab_name": + l.DefaultKeytabName = strings.TrimSpace(p[1]) + case "default_realm": + l.DefaultRealm = strings.TrimSpace(p[1]) + case "default_tgs_enctypes": + l.DefaultTGSEnctypes = strings.Fields(p[1]) + case "default_tkt_enctypes": + l.DefaultTktEnctypes = strings.Fields(p[1]) + case "dns_canonicalize_hostname": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.DNSCanonicalizeHostname = v + case "dns_lookup_kdc": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.DNSLookupKDC = v + case "dns_lookup_realm": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.DNSLookupRealm = v + case "extra_addresses": + ipStr := strings.TrimSpace(p[1]) + for _, ip := range strings.Split(ipStr, ",") { + if eip := net.ParseIP(ip); eip != nil { + l.ExtraAddresses = append(l.ExtraAddresses, eip) + } + } + case "forwardable": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Forwardable = v + case "ignore_acceptor_hostname": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.IgnoreAcceptorHostname = v + case "k5login_authoritative": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.K5LoginAuthoritative = v + case "k5login_directory": + l.K5LoginDirectory = strings.TrimSpace(p[1]) + case "kdc_default_options": + v := strings.TrimSpace(p[1]) + v = strings.Replace(v, "0x", "", -1) + b, err := hex.DecodeString(v) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.KDCDefaultOptions.Bytes = b + l.KDCDefaultOptions.BitLength = len(b) * 8 + case "kdc_timesync": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseInt(p[1], 10, 32) + if err != nil || v < 0 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.KDCTimeSync = int(v) + case "noaddresses": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.NoAddresses = v + case "permitted_enctypes": + l.PermittedEnctypes = strings.Fields(p[1]) + case "preferred_preauth_types": + p[1] = strings.TrimSpace(p[1]) + t := strings.Split(p[1], ",") + var v []int + for _, s := range t { + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + v = append(v, int(i)) + } + l.PreferredPreauthTypes = v + case "proxiable": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Proxiable = v + case "rdns": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.RDNS = v + case "realm_try_domains": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseInt(p[1], 10, 32) + if err != nil || v < -1 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.RealmTryDomains = int(v) + case "renew_lifetime": + d, err := parseDuration(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.RenewLifetime = d + case "safe_checksum_type": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseInt(p[1], 10, 32) + if err != nil || v < 0 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.SafeChecksumType = int(v) + case "ticket_lifetime": + d, err := parseDuration(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.TicketLifetime = d + case "udp_preference_limit": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseUint(p[1], 10, 32) + if err != nil || v > 32700 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.UDPPreferenceLimit = int(v) + case "verify_ap_req_nofail": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.VerifyAPReqNofail = v + } + } + l.DefaultTGSEnctypeIDs = parseETypes(l.DefaultTGSEnctypes, l.AllowWeakCrypto) + l.DefaultTktEnctypeIDs = parseETypes(l.DefaultTktEnctypes, l.AllowWeakCrypto) + l.PermittedEnctypeIDs = parseETypes(l.PermittedEnctypes, l.AllowWeakCrypto) + return nil +} + +// Realm represents an entry in the [realms] section of the configuration. +type Realm struct { + Realm string + AdminServer []string + //auth_to_local //Not implementing for now + //auth_to_local_names //Not implementing for now + DefaultDomain string + KDC []string + KPasswdServer []string //default admin_server:464 + MasterKDC []string +} + +// Parse the lines of a [realms] entry into the Realm struct. +func (r *Realm) parseLines(name string, lines []string) (err error) { + r.Realm = name + var adminServerFinal bool + var KDCFinal bool + var kpasswdServerFinal bool + var masterKDCFinal bool + var ignore bool + var c int // counts the depth of blocks within brackets { } + for _, line := range lines { + if ignore && c > 0 && !strings.Contains(line, "{") && !strings.Contains(line, "}") { + continue + } + //Remove comments after the values + if idx := strings.IndexAny(line, "#;"); idx != -1 { + line = line[:idx] + } + line = strings.TrimSpace(line) + if line == "" { + continue + } + if !strings.Contains(line, "=") && !strings.Contains(line, "}") { + return InvalidErrorf("realms section line (%s)", line) + } + if strings.Contains(line, "v4_") { + ignore = true + err = UnsupportedDirective{"v4 configurations are not supported"} + } + if strings.Contains(line, "{") { + c++ + if ignore { + continue + } + } + if strings.Contains(line, "}") { + c-- + if c < 0 { + return InvalidErrorf("unpaired curly brackets") + } + if ignore { + if c < 1 { + c = 0 + ignore = false + } + continue + } + } + + p := strings.Split(line, "=") + key := strings.TrimSpace(strings.ToLower(p[0])) + v := strings.TrimSpace(p[1]) + switch key { + case "admin_server": + appendUntilFinal(&r.AdminServer, v, &adminServerFinal) + case "default_domain": + r.DefaultDomain = v + case "kdc": + if !strings.Contains(v, ":") { + // No port number specified default to 88 + if strings.HasSuffix(v, `*`) { + v = strings.TrimSpace(strings.TrimSuffix(v, `*`)) + ":88*" + } else { + v = strings.TrimSpace(v) + ":88" + } + } + appendUntilFinal(&r.KDC, v, &KDCFinal) + case "kpasswd_server": + appendUntilFinal(&r.KPasswdServer, v, &kpasswdServerFinal) + case "master_kdc": + appendUntilFinal(&r.MasterKDC, v, &masterKDCFinal) + } + } + //default for Kpasswd_server = admin_server:464 + if len(r.KPasswdServer) < 1 { + for _, a := range r.AdminServer { + s := strings.Split(a, ":") + r.KPasswdServer = append(r.KPasswdServer, s[0]+":464") + } + } + return +} + +// Parse the lines of the [realms] section of the configuration into an slice of Realm structs. +func parseRealms(lines []string) (realms []Realm, err error) { + var name string + var start int + var c int + for i, l := range lines { + //Remove comments after the values + if idx := strings.IndexAny(l, "#;"); idx != -1 { + l = l[:idx] + } + l = strings.TrimSpace(l) + if l == "" { + continue + } + //if strings.Contains(l, "v4_") { + // return nil, errors.New("v4 configurations are not supported in Realms section") + //} + if strings.Contains(l, "{") { + c++ + if !strings.Contains(l, "=") { + return nil, fmt.Errorf("realm configuration line invalid: %s", l) + } + if c == 1 { + start = i + p := strings.Split(l, "=") + name = strings.TrimSpace(p[0]) + } + } + if strings.Contains(l, "}") { + if c < 1 { + // but not started a block!!! + return nil, errors.New("invalid Realms section in configuration") + } + c-- + if c == 0 { + var r Realm + e := r.parseLines(name, lines[start+1:i]) + if e != nil { + if _, ok := e.(UnsupportedDirective); !ok { + err = e + return + } + err = e + } + realms = append(realms, r) + } + } + } + return +} + +// DomainRealm maps the domains to realms representing the [domain_realm] section of the configuration. +type DomainRealm map[string]string + +// Parse the lines of the [domain_realm] section of the configuration and add to the mapping. +func (d *DomainRealm) parseLines(lines []string) error { + for _, line := range lines { + //Remove comments after the values + if idx := strings.IndexAny(line, "#;"); idx != -1 { + line = line[:idx] + } + if strings.TrimSpace(line) == "" { + continue + } + if !strings.Contains(line, "=") { + return InvalidErrorf("realm line (%s)", line) + } + p := strings.Split(line, "=") + domain := strings.TrimSpace(strings.ToLower(p[0])) + realm := strings.TrimSpace(p[1]) + d.addMapping(domain, realm) + } + return nil +} + +// Add a domain to realm mapping. +func (d *DomainRealm) addMapping(domain, realm string) { + (*d)[domain] = realm +} + +// Delete a domain to realm mapping. +func (d *DomainRealm) deleteMapping(domain, realm string) { + delete(*d, domain) +} + +// ResolveRealm resolves the kerberos realm for the specified domain name from the domain to realm mapping. +// The most specific mapping is returned. +func (c *Config) ResolveRealm(domainName string) string { + domainName = strings.TrimSuffix(domainName, ".") + + // Try to match the entire hostname first + if r, ok := c.DomainRealm[domainName]; ok { + return r + } + + // Try to match all DNS domain parts + periods := strings.Count(domainName, ".") + 1 + for i := 2; i <= periods; i++ { + z := strings.SplitN(domainName, ".", i) + if r, ok := c.DomainRealm["."+z[len(z)-1]]; ok { + return r + } + } + return c.LibDefaults.DefaultRealm +} + +// Load the KRB5 configuration from the specified file path. +func Load(cfgPath string) (*Config, error) { + fh, err := os.Open(cfgPath) + if err != nil { + return nil, errors.New("configuration file could not be opened: " + cfgPath + " " + err.Error()) + } + defer fh.Close() + scanner := bufio.NewScanner(fh) + return NewFromScanner(scanner) +} + +// NewFromString creates a new Config struct from a string. +func NewFromString(s string) (*Config, error) { + reader := strings.NewReader(s) + return NewFromReader(reader) +} + +// NewFromReader creates a new Config struct from an io.Reader. +func NewFromReader(r io.Reader) (*Config, error) { + scanner := bufio.NewScanner(r) + return NewFromScanner(scanner) +} + +// NewFromScanner creates a new Config struct from a bufio.Scanner. +func NewFromScanner(scanner *bufio.Scanner) (*Config, error) { + c := New() + var e error + sections := make(map[int]string) + var sectionLineNum []int + var lines []string + for scanner.Scan() { + // Skip comments and blank lines + if matched, _ := regexp.MatchString(`^\s*(#|;|\n)`, scanner.Text()); matched { + continue + } + if matched, _ := regexp.MatchString(`^\s*\[libdefaults\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "libdefaults" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + if matched, _ := regexp.MatchString(`^\s*\[realms\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "realms" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + if matched, _ := regexp.MatchString(`^\s*\[domain_realm\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "domain_realm" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + if matched, _ := regexp.MatchString(`^\s*\[.*\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "unknown_section" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + lines = append(lines, scanner.Text()) + } + for i, start := range sectionLineNum { + var end int + if i+1 >= len(sectionLineNum) { + end = len(lines) + } else { + end = sectionLineNum[i+1] + } + switch section := sections[start]; section { + case "libdefaults": + err := c.LibDefaults.parseLines(lines[start:end]) + if err != nil { + if _, ok := err.(UnsupportedDirective); !ok { + return nil, fmt.Errorf("error processing libdefaults section: %v", err) + } + e = err + } + case "realms": + realms, err := parseRealms(lines[start:end]) + if err != nil { + if _, ok := err.(UnsupportedDirective); !ok { + return nil, fmt.Errorf("error processing realms section: %v", err) + } + e = err + } + c.Realms = realms + case "domain_realm": + err := c.DomainRealm.parseLines(lines[start:end]) + if err != nil { + if _, ok := err.(UnsupportedDirective); !ok { + return nil, fmt.Errorf("error processing domaain_realm section: %v", err) + } + e = err + } + } + } + return c, e +} + +// Parse a space delimited list of ETypes into a list of EType numbers optionally filtering out weak ETypes. +func parseETypes(s []string, w bool) []int32 { + var eti []int32 + for _, et := range s { + if !w { + var weak bool + for _, wet := range strings.Fields(WeakETypeList) { + if et == wet { + weak = true + break + } + } + if weak { + continue + } + } + i := etypeID.EtypeSupported(et) + if i != 0 { + eti = append(eti, i) + } + } + return eti +} + +// Parse a time duration string in the configuration to a golang time.Duration. +func parseDuration(s string) (time.Duration, error) { + s = strings.Replace(strings.TrimSpace(s), " ", "", -1) + + // handle Nd[NmNs] + if strings.Contains(s, "d") { + ds := strings.SplitN(s, "d", 2) + dn, err := strconv.ParseUint(ds[0], 10, 32) + if err != nil { + return time.Duration(0), errors.New("invalid time duration") + } + d := time.Duration(dn*24) * time.Hour + if ds[1] != "" { + dp, err := time.ParseDuration(ds[1]) + if err != nil { + return time.Duration(0), errors.New("invalid time duration") + } + d = d + dp + } + return d, nil + } + + // handle Nm[Ns] + d, err := time.ParseDuration(s) + if err == nil { + return d, nil + } + + // handle N + v, err := strconv.ParseUint(s, 10, 32) + if err == nil && v > 0 { + return time.Duration(v) * time.Second, nil + } + + // handle h:m[:s] + if strings.Contains(s, ":") { + t := strings.Split(s, ":") + if 2 > len(t) || len(t) > 3 { + return time.Duration(0), errors.New("invalid time duration value") + } + var i []int + for _, n := range t { + j, err := strconv.ParseInt(n, 10, 16) + if err != nil { + return time.Duration(0), errors.New("invalid time duration value") + } + i = append(i, int(j)) + } + d := time.Duration(i[0])*time.Hour + time.Duration(i[1])*time.Minute + if len(i) == 3 { + d = d + time.Duration(i[2])*time.Second + } + return d, nil + } + return time.Duration(0), errors.New("invalid time duration value") +} + +// Parse possible boolean values to golang bool. +func parseBoolean(s string) (bool, error) { + s = strings.TrimSpace(s) + v, err := strconv.ParseBool(s) + if err == nil { + return v, nil + } + switch strings.ToLower(s) { + case "yes": + return true, nil + case "y": + return true, nil + case "no": + return false, nil + case "n": + return false, nil + } + return false, errors.New("invalid boolean value") +} + +// Parse array of strings but stop if an asterisk is placed at the end of a line. +func appendUntilFinal(s *[]string, value string, final *bool) { + if *final { + return + } + if last := len(value) - 1; last >= 0 && value[last] == '*' { + *final = true + value = value[:len(value)-1] + } + *s = append(*s, value) +} + +// JSON return details of the config in a JSON format. +func (c *Config) JSON() (string, error) { + b, err := json.MarshalIndent(c, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go new file mode 100644 index 00000000..c3b35c77 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go @@ -0,0 +1,333 @@ +package credentials + +import ( + "bytes" + "encoding/binary" + "errors" + "io/ioutil" + "strings" + "time" + "unsafe" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/types" +) + +const ( + headerFieldTagKDCOffset = 1 +) + +// CCache is the file credentials cache as define here: https://web.mit.edu/kerberos/krb5-latest/doc/formats/ccache_file_format.html +type CCache struct { + Version uint8 + Header header + DefaultPrincipal principal + Credentials []*Credential + Path string +} + +type header struct { + length uint16 + fields []headerField +} + +type headerField struct { + tag uint16 + length uint16 + value []byte +} + +// Credential cache entry principal struct. +type principal struct { + Realm string + PrincipalName types.PrincipalName +} + +// Credential holds a Kerberos client's ccache credential information. +type Credential struct { + Client principal + Server principal + Key types.EncryptionKey + AuthTime time.Time + StartTime time.Time + EndTime time.Time + RenewTill time.Time + IsSKey bool + TicketFlags asn1.BitString + Addresses []types.HostAddress + AuthData []types.AuthorizationDataEntry + Ticket []byte + SecondTicket []byte +} + +// LoadCCache loads a credential cache file into a CCache type. +func LoadCCache(cpath string) (*CCache, error) { + c := new(CCache) + b, err := ioutil.ReadFile(cpath) + if err != nil { + return c, err + } + err = c.Unmarshal(b) + return c, err +} + +// Unmarshal a byte slice of credential cache data into CCache type. +func (c *CCache) Unmarshal(b []byte) error { + p := 0 + //The first byte of the file always has the value 5 + if int8(b[p]) != 5 { + return errors.New("Invalid credential cache data. First byte does not equal 5") + } + p++ + //Get credential cache version + //The second byte contains the version number (1 to 4) + c.Version = b[p] + if c.Version < 1 || c.Version > 4 { + return errors.New("Invalid credential cache data. Keytab version is not within 1 to 4") + } + p++ + //Version 1 or 2 of the file format uses native byte order for integer representations. Versions 3 & 4 always uses big-endian byte order + var endian binary.ByteOrder + endian = binary.BigEndian + if (c.Version == 1 || c.Version == 2) && isNativeEndianLittle() { + endian = binary.LittleEndian + } + if c.Version == 4 { + err := parseHeader(b, &p, c, &endian) + if err != nil { + return err + } + } + c.DefaultPrincipal = parsePrincipal(b, &p, c, &endian) + for p < len(b) { + cred, err := parseCredential(b, &p, c, &endian) + if err != nil { + return err + } + c.Credentials = append(c.Credentials, cred) + } + return nil +} + +func parseHeader(b []byte, p *int, c *CCache, e *binary.ByteOrder) error { + if c.Version != 4 { + return errors.New("Credentials cache version is not 4 so there is no header to parse.") + } + h := header{} + h.length = uint16(readInt16(b, p, e)) + for *p <= int(h.length) { + f := headerField{} + f.tag = uint16(readInt16(b, p, e)) + f.length = uint16(readInt16(b, p, e)) + f.value = b[*p : *p+int(f.length)] + *p += int(f.length) + if !f.valid() { + return errors.New("Invalid credential cache header found") + } + h.fields = append(h.fields, f) + } + c.Header = h + return nil +} + +// Parse the Keytab bytes of a principal into a Keytab entry's principal. +func parsePrincipal(b []byte, p *int, c *CCache, e *binary.ByteOrder) (princ principal) { + if c.Version != 1 { + //Name Type is omitted in version 1 + princ.PrincipalName.NameType = readInt32(b, p, e) + } + nc := int(readInt32(b, p, e)) + if c.Version == 1 { + //In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2 + nc-- + } + lenRealm := readInt32(b, p, e) + princ.Realm = string(readBytes(b, p, int(lenRealm), e)) + for i := 0; i < nc; i++ { + l := readInt32(b, p, e) + princ.PrincipalName.NameString = append(princ.PrincipalName.NameString, string(readBytes(b, p, int(l), e))) + } + return princ +} + +func parseCredential(b []byte, p *int, c *CCache, e *binary.ByteOrder) (cred *Credential, err error) { + cred = new(Credential) + cred.Client = parsePrincipal(b, p, c, e) + cred.Server = parsePrincipal(b, p, c, e) + key := types.EncryptionKey{} + key.KeyType = int32(readInt16(b, p, e)) + if c.Version == 3 { + //repeated twice in version 3 + key.KeyType = int32(readInt16(b, p, e)) + } + key.KeyValue = readData(b, p, e) + cred.Key = key + cred.AuthTime = readTimestamp(b, p, e) + cred.StartTime = readTimestamp(b, p, e) + cred.EndTime = readTimestamp(b, p, e) + cred.RenewTill = readTimestamp(b, p, e) + if ik := readInt8(b, p, e); ik == 0 { + cred.IsSKey = false + } else { + cred.IsSKey = true + } + cred.TicketFlags = types.NewKrbFlags() + cred.TicketFlags.Bytes = readBytes(b, p, 4, e) + l := int(readInt32(b, p, e)) + cred.Addresses = make([]types.HostAddress, l, l) + for i := range cred.Addresses { + cred.Addresses[i] = readAddress(b, p, e) + } + l = int(readInt32(b, p, e)) + cred.AuthData = make([]types.AuthorizationDataEntry, l, l) + for i := range cred.AuthData { + cred.AuthData[i] = readAuthDataEntry(b, p, e) + } + cred.Ticket = readData(b, p, e) + cred.SecondTicket = readData(b, p, e) + return +} + +// GetClientPrincipalName returns a PrincipalName type for the client the credentials cache is for. +func (c *CCache) GetClientPrincipalName() types.PrincipalName { + return c.DefaultPrincipal.PrincipalName +} + +// GetClientRealm returns the reals of the client the credentials cache is for. +func (c *CCache) GetClientRealm() string { + return c.DefaultPrincipal.Realm +} + +// GetClientCredentials returns a Credentials object representing the client of the credentials cache. +func (c *CCache) GetClientCredentials() *Credentials { + return &Credentials{ + username: c.DefaultPrincipal.PrincipalName.PrincipalNameString(), + realm: c.GetClientRealm(), + cname: c.DefaultPrincipal.PrincipalName, + } +} + +// Contains tests if the cache contains a credential for the provided server PrincipalName +func (c *CCache) Contains(p types.PrincipalName) bool { + for _, cred := range c.Credentials { + if cred.Server.PrincipalName.Equal(p) { + return true + } + } + return false +} + +// GetEntry returns a specific credential for the PrincipalName provided. +func (c *CCache) GetEntry(p types.PrincipalName) (*Credential, bool) { + cred := new(Credential) + var found bool + for i := range c.Credentials { + if c.Credentials[i].Server.PrincipalName.Equal(p) { + cred = c.Credentials[i] + found = true + break + } + } + if !found { + return cred, false + } + return cred, true +} + +// GetEntries filters out configuration entries an returns a slice of credentials. +func (c *CCache) GetEntries() []*Credential { + creds := make([]*Credential, 0) + for _, cred := range c.Credentials { + // Filter out configuration entries + if strings.HasPrefix(cred.Server.Realm, "X-CACHECONF") { + continue + } + creds = append(creds, cred) + } + return creds +} + +func (h *headerField) valid() bool { + // See https://web.mit.edu/kerberos/krb5-latest/doc/formats/ccache_file_format.html - Header format + switch h.tag { + case headerFieldTagKDCOffset: + if h.length != 8 || len(h.value) != 8 { + return false + } + return true + } + return false +} + +func readData(b []byte, p *int, e *binary.ByteOrder) []byte { + l := readInt32(b, p, e) + return readBytes(b, p, int(l), e) +} + +func readAddress(b []byte, p *int, e *binary.ByteOrder) types.HostAddress { + a := types.HostAddress{} + a.AddrType = int32(readInt16(b, p, e)) + a.Address = readData(b, p, e) + return a +} + +func readAuthDataEntry(b []byte, p *int, e *binary.ByteOrder) types.AuthorizationDataEntry { + a := types.AuthorizationDataEntry{} + a.ADType = int32(readInt16(b, p, e)) + a.ADData = readData(b, p, e) + return a +} + +// Read bytes representing a timestamp. +func readTimestamp(b []byte, p *int, e *binary.ByteOrder) time.Time { + return time.Unix(int64(readInt32(b, p, e)), 0) +} + +// Read bytes representing an eight bit integer. +func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8) { + buf := bytes.NewBuffer(b[*p : *p+1]) + binary.Read(buf, *e, &i) + *p++ + return +} + +// Read bytes representing a sixteen bit integer. +func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16) { + buf := bytes.NewBuffer(b[*p : *p+2]) + binary.Read(buf, *e, &i) + *p += 2 + return +} + +// Read bytes representing a thirty two bit integer. +func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32) { + buf := bytes.NewBuffer(b[*p : *p+4]) + binary.Read(buf, *e, &i) + *p += 4 + return +} + +func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) []byte { + buf := bytes.NewBuffer(b[*p : *p+s]) + r := make([]byte, s) + binary.Read(buf, *e, &r) + *p += s + return r +} + +func isNativeEndianLittle() bool { + var x = 0x012345678 + var p = unsafe.Pointer(&x) + var bp = (*[4]byte)(p) + + var endian bool + if 0x01 == bp[0] { + endian = false + } else if (0x78 & 0xff) == (bp[0] & 0xff) { + endian = true + } else { + // Default to big endian + endian = false + } + return endian +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/credentials/credentials.go b/vendor/github.com/jcmturner/gokrb5/v8/credentials/credentials.go new file mode 100644 index 00000000..bddbc7e3 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/credentials/credentials.go @@ -0,0 +1,405 @@ +// Package credentials provides credentials management for Kerberos 5 authentication. +package credentials + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/jcmturner/gokrb5/v8/iana/nametype" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/types" +) + +const ( + // AttributeKeyADCredentials assigned number for AD credentials. + AttributeKeyADCredentials = "gokrb5AttributeKeyADCredentials" +) + +// Credentials struct for a user. +// Contains either a keytab, password or both. +// Keytabs are used over passwords if both are defined. +type Credentials struct { + username string + displayName string + realm string + cname types.PrincipalName + keytab *keytab.Keytab + password string + attributes map[string]interface{} + validUntil time.Time + authenticated bool + human bool + authTime time.Time + groupMembership map[string]bool + sessionID string +} + +// marshalCredentials is used to enable marshaling and unmarshaling of credentials +// without having exported fields on the Credentials struct +type marshalCredentials struct { + Username string + DisplayName string + Realm string + CName types.PrincipalName `json:"-"` + Keytab bool + Password bool + Attributes map[string]interface{} `json:"-"` + ValidUntil time.Time + Authenticated bool + Human bool + AuthTime time.Time + GroupMembership map[string]bool `json:"-"` + SessionID string +} + +// ADCredentials contains information obtained from the PAC. +type ADCredentials struct { + EffectiveName string + FullName string + UserID int + PrimaryGroupID int + LogOnTime time.Time + LogOffTime time.Time + PasswordLastSet time.Time + GroupMembershipSIDs []string + LogonDomainName string + LogonDomainID string + LogonServer string +} + +// New creates a new Credentials instance. +func New(username string, realm string) *Credentials { + uid, err := uuid.GenerateUUID() + if err != nil { + uid = "00unique-sess-ions-uuid-unavailable0" + } + return &Credentials{ + username: username, + displayName: username, + realm: realm, + cname: types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, username), + keytab: keytab.New(), + attributes: make(map[string]interface{}), + groupMembership: make(map[string]bool), + sessionID: uid, + human: true, + } +} + +// NewFromPrincipalName creates a new Credentials instance with the user details provides as a PrincipalName type. +func NewFromPrincipalName(cname types.PrincipalName, realm string) *Credentials { + c := New(cname.PrincipalNameString(), realm) + c.cname = cname + return c +} + +// WithKeytab sets the Keytab in the Credentials struct. +func (c *Credentials) WithKeytab(kt *keytab.Keytab) *Credentials { + c.keytab = kt + c.password = "" + return c +} + +// Keytab returns the credential's Keytab. +func (c *Credentials) Keytab() *keytab.Keytab { + return c.keytab +} + +// HasKeytab queries if the Credentials has a keytab defined. +func (c *Credentials) HasKeytab() bool { + if c.keytab != nil && len(c.keytab.Entries) > 0 { + return true + } + return false +} + +// WithPassword sets the password in the Credentials struct. +func (c *Credentials) WithPassword(password string) *Credentials { + c.password = password + c.keytab = keytab.New() // clear any keytab + return c +} + +// Password returns the credential's password. +func (c *Credentials) Password() string { + return c.password +} + +// HasPassword queries if the Credentials has a password defined. +func (c *Credentials) HasPassword() bool { + if c.password != "" { + return true + } + return false +} + +// SetValidUntil sets the expiry time of the credentials +func (c *Credentials) SetValidUntil(t time.Time) { + c.validUntil = t +} + +// SetADCredentials adds ADCredentials attributes to the credentials +func (c *Credentials) SetADCredentials(a ADCredentials) { + c.SetAttribute(AttributeKeyADCredentials, a) + if a.FullName != "" { + c.SetDisplayName(a.FullName) + } + if a.EffectiveName != "" { + c.SetUserName(a.EffectiveName) + } + for i := range a.GroupMembershipSIDs { + c.AddAuthzAttribute(a.GroupMembershipSIDs[i]) + } +} + +// GetADCredentials returns ADCredentials attributes sorted in the credential +func (c *Credentials) GetADCredentials() ADCredentials { + if a, ok := c.attributes[AttributeKeyADCredentials].(ADCredentials); ok { + return a + } + return ADCredentials{} +} + +// Methods to implement goidentity.Identity interface + +// UserName returns the credential's username. +func (c *Credentials) UserName() string { + return c.username +} + +// SetUserName sets the username value on the credential. +func (c *Credentials) SetUserName(s string) { + c.username = s +} + +// CName returns the credential's client principal name. +func (c *Credentials) CName() types.PrincipalName { + return c.cname +} + +// SetCName sets the client principal name on the credential. +func (c *Credentials) SetCName(pn types.PrincipalName) { + c.cname = pn +} + +// Domain returns the credential's domain. +func (c *Credentials) Domain() string { + return c.realm +} + +// SetDomain sets the domain value on the credential. +func (c *Credentials) SetDomain(s string) { + c.realm = s +} + +// Realm returns the credential's realm. Same as the domain. +func (c *Credentials) Realm() string { + return c.Domain() +} + +// SetRealm sets the realm value on the credential. Same as the domain +func (c *Credentials) SetRealm(s string) { + c.SetDomain(s) +} + +// DisplayName returns the credential's display name. +func (c *Credentials) DisplayName() string { + return c.displayName +} + +// SetDisplayName sets the display name value on the credential. +func (c *Credentials) SetDisplayName(s string) { + c.displayName = s +} + +// Human returns if the credential represents a human or not. +func (c *Credentials) Human() bool { + return c.human +} + +// SetHuman sets the credential as human. +func (c *Credentials) SetHuman(b bool) { + c.human = b +} + +// AuthTime returns the time the credential was authenticated. +func (c *Credentials) AuthTime() time.Time { + return c.authTime +} + +// SetAuthTime sets the time the credential was authenticated. +func (c *Credentials) SetAuthTime(t time.Time) { + c.authTime = t +} + +// AuthzAttributes returns the credentials authorizing attributes. +func (c *Credentials) AuthzAttributes() []string { + s := make([]string, len(c.groupMembership)) + i := 0 + for a := range c.groupMembership { + s[i] = a + i++ + } + return s +} + +// Authenticated indicates if the credential has been successfully authenticated or not. +func (c *Credentials) Authenticated() bool { + return c.authenticated +} + +// SetAuthenticated sets the credential as having been successfully authenticated. +func (c *Credentials) SetAuthenticated(b bool) { + c.authenticated = b +} + +// AddAuthzAttribute adds an authorization attribute to the credential. +func (c *Credentials) AddAuthzAttribute(a string) { + c.groupMembership[a] = true +} + +// RemoveAuthzAttribute removes an authorization attribute from the credential. +func (c *Credentials) RemoveAuthzAttribute(a string) { + if _, ok := c.groupMembership[a]; !ok { + return + } + delete(c.groupMembership, a) +} + +// EnableAuthzAttribute toggles an authorization attribute to an enabled state on the credential. +func (c *Credentials) EnableAuthzAttribute(a string) { + if enabled, ok := c.groupMembership[a]; ok && !enabled { + c.groupMembership[a] = true + } +} + +// DisableAuthzAttribute toggles an authorization attribute to a disabled state on the credential. +func (c *Credentials) DisableAuthzAttribute(a string) { + if enabled, ok := c.groupMembership[a]; ok && enabled { + c.groupMembership[a] = false + } +} + +// Authorized indicates if the credential has the specified authorizing attribute. +func (c *Credentials) Authorized(a string) bool { + if enabled, ok := c.groupMembership[a]; ok && enabled { + return true + } + return false +} + +// SessionID returns the credential's session ID. +func (c *Credentials) SessionID() string { + return c.sessionID +} + +// Expired indicates if the credential has expired. +func (c *Credentials) Expired() bool { + if !c.validUntil.IsZero() && time.Now().UTC().After(c.validUntil) { + return true + } + return false +} + +// ValidUntil returns the credential's valid until date +func (c *Credentials) ValidUntil() time.Time { + return c.validUntil +} + +// Attributes returns the Credentials' attributes map. +func (c *Credentials) Attributes() map[string]interface{} { + return c.attributes +} + +// SetAttribute sets the value of an attribute. +func (c *Credentials) SetAttribute(k string, v interface{}) { + c.attributes[k] = v +} + +// SetAttributes replaces the attributes map with the one provided. +func (c *Credentials) SetAttributes(a map[string]interface{}) { + c.attributes = a +} + +// RemoveAttribute deletes an attribute from the attribute map that has the key provided. +func (c *Credentials) RemoveAttribute(k string) { + delete(c.attributes, k) +} + +// Marshal the Credentials into a byte slice +func (c *Credentials) Marshal() ([]byte, error) { + gob.Register(map[string]interface{}{}) + gob.Register(ADCredentials{}) + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + mc := marshalCredentials{ + Username: c.username, + DisplayName: c.displayName, + Realm: c.realm, + CName: c.cname, + Keytab: c.HasKeytab(), + Password: c.HasPassword(), + Attributes: c.attributes, + ValidUntil: c.validUntil, + Authenticated: c.authenticated, + Human: c.human, + AuthTime: c.authTime, + GroupMembership: c.groupMembership, + SessionID: c.sessionID, + } + err := enc.Encode(&mc) + if err != nil { + return []byte{}, err + } + return buf.Bytes(), nil +} + +// Unmarshal a byte slice into Credentials +func (c *Credentials) Unmarshal(b []byte) error { + gob.Register(map[string]interface{}{}) + gob.Register(ADCredentials{}) + mc := new(marshalCredentials) + buf := bytes.NewBuffer(b) + dec := gob.NewDecoder(buf) + err := dec.Decode(mc) + if err != nil { + return err + } + c.username = mc.Username + c.displayName = mc.DisplayName + c.realm = mc.Realm + c.cname = mc.CName + c.attributes = mc.Attributes + c.validUntil = mc.ValidUntil + c.authenticated = mc.Authenticated + c.human = mc.Human + c.authTime = mc.AuthTime + c.groupMembership = mc.GroupMembership + c.sessionID = mc.SessionID + return nil +} + +// JSON return details of the Credentials in a JSON format. +func (c *Credentials) JSON() (string, error) { + mc := marshalCredentials{ + Username: c.username, + DisplayName: c.displayName, + Realm: c.realm, + CName: c.cname, + Keytab: c.HasKeytab(), + Password: c.HasPassword(), + ValidUntil: c.validUntil, + Authenticated: c.authenticated, + Human: c.human, + AuthTime: c.authTime, + SessionID: c.sessionID, + } + b, err := json.MarshalIndent(mc, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha1-96.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha1-96.go new file mode 100644 index 00000000..dd8babd5 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha1-96.go @@ -0,0 +1,129 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha1" + "hash" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/rfc3961" + "github.com/jcmturner/gokrb5/v8/crypto/rfc3962" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// RFC 3962 + +// Aes128CtsHmacSha96 implements Kerberos encryption type aes128-cts-hmac-sha1-96 +type Aes128CtsHmacSha96 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes128CtsHmacSha96) GetETypeID() int32 { + return etypeID.AES128_CTS_HMAC_SHA1_96 +} + +// GetHashID returns the checksum type ID number. +func (e Aes128CtsHmacSha96) GetHashID() int32 { + return chksumtype.HMAC_SHA1_96_AES128 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes128CtsHmacSha96) GetKeyByteSize() int { + return 128 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes128CtsHmacSha96) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes128CtsHmacSha96) GetHashFunc() func() hash.Hash { + return sha1.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes128CtsHmacSha96) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes128CtsHmacSha96) GetDefaultStringToKeyParams() string { + return "00001000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes128CtsHmacSha96) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes128CtsHmacSha96) GetHMACBitLength() int { + return 96 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes128CtsHmacSha96) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes128CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + return rfc3962.StringToKey(secret, salt, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes128CtsHmacSha96) RandomToKey(b []byte) []byte { + return rfc3961.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes128CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc3962.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes128CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc3962.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes128CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) { + return rfc3962.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes128CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc3962.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes128CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveKey(protocolKey, usage, e) +} + +// DeriveRandom generates data needed for key generation. +func (e Aes128CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e Aes128CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes128CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes128CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha256-128.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha256-128.go new file mode 100644 index 00000000..b05af7d3 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha256-128.go @@ -0,0 +1,132 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha256" + "hash" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/rfc8009" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// RFC https://tools.ietf.org/html/rfc8009 + +// Aes128CtsHmacSha256128 implements Kerberos encryption type aes128-cts-hmac-sha256-128 +type Aes128CtsHmacSha256128 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes128CtsHmacSha256128) GetETypeID() int32 { + return etypeID.AES128_CTS_HMAC_SHA256_128 +} + +// GetHashID returns the checksum type ID number. +func (e Aes128CtsHmacSha256128) GetHashID() int32 { + return chksumtype.HMAC_SHA256_128_AES128 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes128CtsHmacSha256128) GetKeyByteSize() int { + return 128 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes128CtsHmacSha256128) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes128CtsHmacSha256128) GetHashFunc() func() hash.Hash { + return sha256.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes128CtsHmacSha256128) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes128CtsHmacSha256128) GetDefaultStringToKeyParams() string { + return "00008000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes128CtsHmacSha256128) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes128CtsHmacSha256128) GetHMACBitLength() int { + return 128 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes128CtsHmacSha256128) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes128CtsHmacSha256128) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + saltp := rfc8009.GetSaltP(salt, "aes128-cts-hmac-sha256-128") + return rfc8009.StringToKey(secret, saltp, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes128CtsHmacSha256128) RandomToKey(b []byte) []byte { + return rfc8009.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes128CtsHmacSha256128) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc8009.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes128CtsHmacSha256128) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc8009.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes128CtsHmacSha256128) DecryptData(key, data []byte) ([]byte, error) { + return rfc8009.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes128CtsHmacSha256128) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc8009.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes128CtsHmacSha256128) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveKey(protocolKey, usage, e), nil +} + +// DeriveRandom generates data needed for key generation. +func (e Aes128CtsHmacSha256128) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the ciphertext message. +// As the hash is calculated over the iv concatenated with the AES cipher output not the plaintext the pt value to this +// interface method is not use. Pass any []byte. +func (e Aes128CtsHmacSha256128) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + // We don't need ib just there for the interface + return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes128CtsHmacSha256128) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes128CtsHmacSha256128) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha1-96.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha1-96.go new file mode 100644 index 00000000..45e439a4 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha1-96.go @@ -0,0 +1,129 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha1" + "hash" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/rfc3961" + "github.com/jcmturner/gokrb5/v8/crypto/rfc3962" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// RFC 3962 + +// Aes256CtsHmacSha96 implements Kerberos encryption type aes256-cts-hmac-sha1-96 +type Aes256CtsHmacSha96 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes256CtsHmacSha96) GetETypeID() int32 { + return etypeID.AES256_CTS_HMAC_SHA1_96 +} + +// GetHashID returns the checksum type ID number. +func (e Aes256CtsHmacSha96) GetHashID() int32 { + return chksumtype.HMAC_SHA1_96_AES256 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes256CtsHmacSha96) GetKeyByteSize() int { + return 256 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes256CtsHmacSha96) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes256CtsHmacSha96) GetHashFunc() func() hash.Hash { + return sha1.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes256CtsHmacSha96) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes256CtsHmacSha96) GetDefaultStringToKeyParams() string { + return "00001000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes256CtsHmacSha96) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes256CtsHmacSha96) GetHMACBitLength() int { + return 96 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes256CtsHmacSha96) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes256CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + return rfc3962.StringToKey(secret, salt, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes256CtsHmacSha96) RandomToKey(b []byte) []byte { + return rfc3961.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes256CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc3962.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes256CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc3962.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes256CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) { + return rfc3962.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes256CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc3962.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes256CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveKey(protocolKey, usage, e) +} + +// DeriveRandom generates data needed for key generation. +func (e Aes256CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e Aes256CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes256CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes256CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha384-192.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha384-192.go new file mode 100644 index 00000000..6a544759 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha384-192.go @@ -0,0 +1,132 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha512" + "hash" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/rfc8009" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// RFC https://tools.ietf.org/html/rfc8009 + +// Aes256CtsHmacSha384192 implements Kerberos encryption type aes256-cts-hmac-sha384-192 +type Aes256CtsHmacSha384192 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes256CtsHmacSha384192) GetETypeID() int32 { + return etypeID.AES256_CTS_HMAC_SHA384_192 +} + +// GetHashID returns the checksum type ID number. +func (e Aes256CtsHmacSha384192) GetHashID() int32 { + return chksumtype.HMAC_SHA384_192_AES256 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes256CtsHmacSha384192) GetKeyByteSize() int { + return 192 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes256CtsHmacSha384192) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes256CtsHmacSha384192) GetHashFunc() func() hash.Hash { + return sha512.New384 +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes256CtsHmacSha384192) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes256CtsHmacSha384192) GetDefaultStringToKeyParams() string { + return "00008000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes256CtsHmacSha384192) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes256CtsHmacSha384192) GetHMACBitLength() int { + return 192 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes256CtsHmacSha384192) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes256CtsHmacSha384192) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + saltp := rfc8009.GetSaltP(salt, "aes256-cts-hmac-sha384-192") + return rfc8009.StringToKey(secret, saltp, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes256CtsHmacSha384192) RandomToKey(b []byte) []byte { + return rfc8009.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes256CtsHmacSha384192) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc8009.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes256CtsHmacSha384192) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc8009.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes256CtsHmacSha384192) DecryptData(key, data []byte) ([]byte, error) { + return rfc8009.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes256CtsHmacSha384192) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc8009.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes256CtsHmacSha384192) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveKey(protocolKey, usage, e), nil +} + +// DeriveRandom generates data needed for key generation. +func (e Aes256CtsHmacSha384192) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the ciphertext message. +// As the hash is calculated over the iv concatenated with the AES cipher output not the plaintext the pt value to this +// interface method is not use. Pass any []byte. +func (e Aes256CtsHmacSha384192) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + // We don't need ib just there for the interface + return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes256CtsHmacSha384192) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes256CtsHmacSha384192) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/common/common.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/common/common.go new file mode 100644 index 00000000..dab55be7 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/common/common.go @@ -0,0 +1,132 @@ +// Package common provides encryption methods common across encryption types +package common + +import ( + "bytes" + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +// ZeroPad pads bytes with zeros to nearest multiple of message size m. +func ZeroPad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("Invalid message block size when padding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("Data not valid to pad: Zero size") + } + if l := len(b) % m; l != 0 { + n := m - l + z := make([]byte, n) + b = append(b, z...) + } + return b, nil +} + +// PKCS7Pad pads bytes according to RFC 2315 to nearest multiple of message size m. +func PKCS7Pad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("Invalid message block size when padding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("Data not valid to pad: Zero size") + } + n := m - (len(b) % m) + pb := make([]byte, len(b)+n) + copy(pb, b) + copy(pb[len(b):], bytes.Repeat([]byte{byte(n)}, n)) + return pb, nil +} + +// PKCS7Unpad removes RFC 2315 padding from byes where message size is m. +func PKCS7Unpad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("invalid message block size when unpadding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("padded data not valid: Zero size") + } + if len(b)%m != 0 { + return nil, errors.New("padded data not valid: Not multiple of message block size") + } + c := b[len(b)-1] + n := int(c) + if n == 0 || n > len(b) { + return nil, errors.New("padded data not valid: Data may not have been padded") + } + for i := 0; i < n; i++ { + if b[len(b)-n+i] != c { + return nil, errors.New("padded data not valid") + } + } + return b[:len(b)-n], nil +} + +// GetHash generates the keyed hash value according to the etype's hash function. +func GetHash(pt, key []byte, usage []byte, etype etype.EType) ([]byte, error) { + k, err := etype.DeriveKey(key, usage) + if err != nil { + return nil, fmt.Errorf("unable to derive key for checksum: %v", err) + } + mac := hmac.New(etype.GetHashFunc(), k) + p := make([]byte, len(pt)) + copy(p, pt) + mac.Write(p) + return mac.Sum(nil)[:etype.GetHMACBitLength()/8], nil +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func GetChecksumHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) { + return GetHash(b, key, GetUsageKc(usage), etype) +} + +// GetIntegrityHash returns a keyed integrity hash of the bytes provided. +func GetIntegrityHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) { + return GetHash(b, key, GetUsageKi(usage), etype) +} + +// VerifyChecksum compares the checksum of the msg bytes is the same as the checksum provided. +func VerifyChecksum(key, chksum, msg []byte, usage uint32, etype etype.EType) bool { + //The encrypted message is a concatenation of the encrypted output and the hash HMAC. + expectedMAC, _ := GetChecksumHash(msg, key, usage, etype) + return hmac.Equal(chksum, expectedMAC) +} + +// GetUsageKc returns the checksum key usage value for the usage number un. +// +// See RFC 3961 5.3 key-derivation function definition. +func GetUsageKc(un uint32) []byte { + return getUsage(un, 0x99) +} + +// GetUsageKe returns the encryption key usage value for the usage number un +// +// See RFC 3961 5.3 key-derivation function definition. +func GetUsageKe(un uint32) []byte { + return getUsage(un, 0xAA) +} + +// GetUsageKi returns the integrity key usage value for the usage number un +// +// See RFC 3961 5.3 key-derivation function definition. +func GetUsageKi(un uint32) []byte { + return getUsage(un, 0x55) +} + +func getUsage(un uint32, o byte) []byte { + var buf bytes.Buffer + binary.Write(&buf, binary.BigEndian, un) + return append(buf.Bytes(), o) +} + +// IterationsToS2Kparams converts the number of iterations as an integer to a string representation. +func IterationsToS2Kparams(i uint32) string { + b := make([]byte, 4, 4) + binary.BigEndian.PutUint32(b, i) + return hex.EncodeToString(b) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/crypto.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/crypto.go new file mode 100644 index 00000000..5c96ddfd --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/crypto.go @@ -0,0 +1,175 @@ +// Package crypto implements cryptographic functions for Kerberos 5 implementation. +package crypto + +import ( + "encoding/hex" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto/etype" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" + "github.com/jcmturner/gokrb5/v8/iana/patype" + "github.com/jcmturner/gokrb5/v8/types" +) + +// GetEtype returns an instances of the required etype struct for the etype ID. +func GetEtype(id int32) (etype.EType, error) { + switch id { + case etypeID.AES128_CTS_HMAC_SHA1_96: + var et Aes128CtsHmacSha96 + return et, nil + case etypeID.AES256_CTS_HMAC_SHA1_96: + var et Aes256CtsHmacSha96 + return et, nil + case etypeID.AES128_CTS_HMAC_SHA256_128: + var et Aes128CtsHmacSha256128 + return et, nil + case etypeID.AES256_CTS_HMAC_SHA384_192: + var et Aes256CtsHmacSha384192 + return et, nil + case etypeID.DES3_CBC_SHA1_KD: + var et Des3CbcSha1Kd + return et, nil + case etypeID.RC4_HMAC: + var et RC4HMAC + return et, nil + default: + return nil, fmt.Errorf("unknown or unsupported EType: %d", id) + } +} + +// GetChksumEtype returns an instances of the required etype struct for the checksum ID. +func GetChksumEtype(id int32) (etype.EType, error) { + switch id { + case chksumtype.HMAC_SHA1_96_AES128: + var et Aes128CtsHmacSha96 + return et, nil + case chksumtype.HMAC_SHA1_96_AES256: + var et Aes256CtsHmacSha96 + return et, nil + case chksumtype.HMAC_SHA256_128_AES128: + var et Aes128CtsHmacSha256128 + return et, nil + case chksumtype.HMAC_SHA384_192_AES256: + var et Aes256CtsHmacSha384192 + return et, nil + case chksumtype.HMAC_SHA1_DES3_KD: + var et Des3CbcSha1Kd + return et, nil + case chksumtype.KERB_CHECKSUM_HMAC_MD5: + var et RC4HMAC + return et, nil + //case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED: + // var et RC4HMAC + // return et, nil + default: + return nil, fmt.Errorf("unknown or unsupported checksum type: %d", id) + } +} + +// GetKeyFromPassword generates an encryption key from the principal's password. +func GetKeyFromPassword(passwd string, cname types.PrincipalName, realm string, etypeID int32, pas types.PADataSequence) (types.EncryptionKey, etype.EType, error) { + var key types.EncryptionKey + et, err := GetEtype(etypeID) + if err != nil { + return key, et, fmt.Errorf("error getting encryption type: %v", err) + } + sk2p := et.GetDefaultStringToKeyParams() + var salt string + var paID int32 + for _, pa := range pas { + switch pa.PADataType { + case patype.PA_PW_SALT: + if paID > pa.PADataType { + continue + } + salt = string(pa.PADataValue) + case patype.PA_ETYPE_INFO: + if paID > pa.PADataType { + continue + } + var eti types.ETypeInfo + err := eti.Unmarshal(pa.PADataValue) + if err != nil { + return key, et, fmt.Errorf("error unmashaling PA Data to PA-ETYPE-INFO2: %v", err) + } + if etypeID != eti[0].EType { + et, err = GetEtype(eti[0].EType) + if err != nil { + return key, et, fmt.Errorf("error getting encryption type: %v", err) + } + } + salt = string(eti[0].Salt) + case patype.PA_ETYPE_INFO2: + if paID > pa.PADataType { + continue + } + var et2 types.ETypeInfo2 + err := et2.Unmarshal(pa.PADataValue) + if err != nil { + return key, et, fmt.Errorf("error unmashalling PA Data to PA-ETYPE-INFO2: %v", err) + } + if etypeID != et2[0].EType { + et, err = GetEtype(et2[0].EType) + if err != nil { + return key, et, fmt.Errorf("error getting encryption type: %v", err) + } + } + if len(et2[0].S2KParams) == 4 { + sk2p = hex.EncodeToString(et2[0].S2KParams) + } + salt = et2[0].Salt + } + } + if salt == "" { + salt = cname.GetSalt(realm) + } + k, err := et.StringToKey(passwd, salt, sk2p) + if err != nil { + return key, et, fmt.Errorf("error deriving key from string: %+v", err) + } + key = types.EncryptionKey{ + KeyType: etypeID, + KeyValue: k, + } + return key, et, nil +} + +// GetEncryptedData encrypts the data provided and returns and EncryptedData type. +// Pass a usage value of zero to use the key provided directly rather than deriving one. +func GetEncryptedData(plainBytes []byte, key types.EncryptionKey, usage uint32, kvno int) (types.EncryptedData, error) { + var ed types.EncryptedData + et, err := GetEtype(key.KeyType) + if err != nil { + return ed, fmt.Errorf("error getting etype: %v", err) + } + _, b, err := et.EncryptMessage(key.KeyValue, plainBytes, usage) + if err != nil { + return ed, err + } + + ed = types.EncryptedData{ + EType: key.KeyType, + Cipher: b, + KVNO: kvno, + } + return ed, nil +} + +// DecryptEncPart decrypts the EncryptedData. +func DecryptEncPart(ed types.EncryptedData, key types.EncryptionKey, usage uint32) ([]byte, error) { + return DecryptMessage(ed.Cipher, key, usage) +} + +// DecryptMessage decrypts the ciphertext and verifies the integrity. +func DecryptMessage(ciphertext []byte, key types.EncryptionKey, usage uint32) ([]byte, error) { + et, err := GetEtype(key.KeyType) + if err != nil { + return []byte{}, fmt.Errorf("error decrypting: %v", err) + } + b, err := et.DecryptMessage(key.KeyValue, ciphertext, usage) + if err != nil { + return nil, fmt.Errorf("error decrypting: %v", err) + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/des3-cbc-sha1-kd.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/des3-cbc-sha1-kd.go new file mode 100644 index 00000000..6e650eb6 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/des3-cbc-sha1-kd.go @@ -0,0 +1,139 @@ +package crypto + +import ( + "crypto/des" + "crypto/hmac" + "crypto/sha1" + "errors" + "hash" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/rfc3961" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +//RFC: 3961 Section 6.3 + +// Des3CbcSha1Kd implements Kerberos encryption type des3-cbc-hmac-sha1-kd +type Des3CbcSha1Kd struct { +} + +// GetETypeID returns the EType ID number. +func (e Des3CbcSha1Kd) GetETypeID() int32 { + return etypeID.DES3_CBC_SHA1_KD +} + +// GetHashID returns the checksum type ID number. +func (e Des3CbcSha1Kd) GetHashID() int32 { + return chksumtype.HMAC_SHA1_DES3_KD +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Des3CbcSha1Kd) GetKeyByteSize() int { + return 24 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Des3CbcSha1Kd) GetKeySeedBitLength() int { + return 21 * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Des3CbcSha1Kd) GetHashFunc() func() hash.Hash { + return sha1.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Des3CbcSha1Kd) GetMessageBlockByteSize() int { + //For traditional CBC mode with padding, it would be the underlying cipher's block size + return des.BlockSize +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Des3CbcSha1Kd) GetDefaultStringToKeyParams() string { + var s string + return s +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Des3CbcSha1Kd) GetConfounderByteSize() int { + return des.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Des3CbcSha1Kd) GetHMACBitLength() int { + return e.GetHashFunc()().Size() * 8 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Des3CbcSha1Kd) GetCypherBlockBitLength() int { + return des.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Des3CbcSha1Kd) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + if s2kparams != "" { + return []byte{}, errors.New("s2kparams must be an empty string") + } + return rfc3961.DES3StringToKey(secret, salt, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Des3CbcSha1Kd) RandomToKey(b []byte) []byte { + return rfc3961.DES3RandomToKey(b) +} + +// DeriveRandom generates data needed for key generation. +func (e Des3CbcSha1Kd) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + r, err := rfc3961.DeriveRandom(protocolKey, usage, e) + return r, err +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Des3CbcSha1Kd) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + r, err := e.DeriveRandom(protocolKey, usage) + if err != nil { + return nil, err + } + return e.RandomToKey(r), nil +} + +// EncryptData encrypts the data provided. +func (e Des3CbcSha1Kd) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc3961.DES3EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Des3CbcSha1Kd) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc3961.DES3EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Des3CbcSha1Kd) DecryptData(key, data []byte) ([]byte, error) { + return rfc3961.DES3DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Des3CbcSha1Kd) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc3961.DES3DecryptMessage(key, ciphertext, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e Des3CbcSha1Kd) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Des3CbcSha1Kd) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Des3CbcSha1Kd) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go new file mode 100644 index 00000000..ab1496d3 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go @@ -0,0 +1,29 @@ +// Package etype provides the Kerberos Encryption Type interface +package etype + +import "hash" + +// EType is the interface defining the Encryption Type. +type EType interface { + GetETypeID() int32 + GetHashID() int32 + GetKeyByteSize() int + GetKeySeedBitLength() int + GetDefaultStringToKeyParams() string + StringToKey(string, salt, s2kparams string) ([]byte, error) + RandomToKey(b []byte) []byte + GetHMACBitLength() int + GetMessageBlockByteSize() int + EncryptData(key, data []byte) ([]byte, []byte, error) + EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) + DecryptData(key, data []byte) ([]byte, error) + DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) + GetCypherBlockBitLength() int + GetConfounderByteSize() int + DeriveKey(protocolKey, usage []byte) ([]byte, error) + DeriveRandom(protocolKey, usage []byte) ([]byte, error) + VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool + GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) + VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool + GetHashFunc() func() hash.Hash +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rc4-hmac.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rc4-hmac.go new file mode 100644 index 00000000..42f84b85 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rc4-hmac.go @@ -0,0 +1,133 @@ +package crypto + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "hash" + "io" + + "github.com/jcmturner/gokrb5/v8/crypto/rfc3961" + "github.com/jcmturner/gokrb5/v8/crypto/rfc4757" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" + "golang.org/x/crypto/md4" +) + +// RC4HMAC implements Kerberos encryption type rc4-hmac +type RC4HMAC struct { +} + +// GetETypeID returns the EType ID number. +func (e RC4HMAC) GetETypeID() int32 { + return etypeID.RC4_HMAC +} + +// GetHashID returns the checksum type ID number. +func (e RC4HMAC) GetHashID() int32 { + return chksumtype.KERB_CHECKSUM_HMAC_MD5 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e RC4HMAC) GetKeyByteSize() int { + return 16 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e RC4HMAC) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e RC4HMAC) GetHashFunc() func() hash.Hash { + return md5.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e RC4HMAC) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e RC4HMAC) GetDefaultStringToKeyParams() string { + return "" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e RC4HMAC) GetConfounderByteSize() int { + return 8 +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e RC4HMAC) GetHMACBitLength() int { + return md5.Size * 8 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e RC4HMAC) GetCypherBlockBitLength() int { + return 8 // doesn't really apply +} + +// StringToKey returns a key derived from the string provided. +func (e RC4HMAC) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + return rfc4757.StringToKey(secret) +} + +// RandomToKey returns a key from the bytes provided. +func (e RC4HMAC) RandomToKey(b []byte) []byte { + r := bytes.NewReader(b) + h := md4.New() + io.Copy(h, r) + return h.Sum(nil) +} + +// EncryptData encrypts the data provided. +func (e RC4HMAC) EncryptData(key, data []byte) ([]byte, []byte, error) { + b, err := rfc4757.EncryptData(key, data, e) + return []byte{}, b, err +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e RC4HMAC) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + b, err := rfc4757.EncryptMessage(key, message, usage, false, e) + return []byte{}, b, err +} + +// DecryptData decrypts the data provided. +func (e RC4HMAC) DecryptData(key, data []byte) ([]byte, error) { + return rfc4757.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e RC4HMAC) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc4757.DecryptMessage(key, ciphertext, usage, false, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e RC4HMAC) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc4757.HMAC(protocolKey, usage), nil +} + +// DeriveRandom generates data needed for key generation. +func (e RC4HMAC) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e RC4HMAC) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc4757.VerifyIntegrity(protocolKey, pt, ct, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e RC4HMAC) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return rfc4757.Checksum(protocolKey, usage, data) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e RC4HMAC) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + checksum, err := rfc4757.Checksum(protocolKey, usage, data) + if err != nil { + return false + } + return hmac.Equal(checksum, chksum) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/encryption.go new file mode 100644 index 00000000..1383258c --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/encryption.go @@ -0,0 +1,119 @@ +// Package rfc3961 provides encryption and checksum methods as specified in RFC 3961 +package rfc3961 + +import ( + "crypto/cipher" + "crypto/des" + "crypto/hmac" + "crypto/rand" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +// DES3EncryptData encrypts the data provided using DES3 and methods specific to the etype provided. +func DES3EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { + if len(key) != e.GetKeyByteSize() { + return nil, nil, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + data, _ = common.ZeroPad(data, e.GetMessageBlockByteSize()) + + block, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, nil, fmt.Errorf("error creating cipher: %v", err) + } + + //RFC 3961: initial cipher state All bits zero + ivz := make([]byte, des.BlockSize) + + ct := make([]byte, len(data)) + mode := cipher.NewCBCEncrypter(block, ivz) + mode.CryptBlocks(ct, data) + return ct[len(ct)-e.GetMessageBlockByteSize():], ct, nil +} + +// DES3EncryptMessage encrypts the message provided using DES3 and methods specific to the etype provided. +// The encrypted data is concatenated with its integrity hash to create an encrypted message. +func DES3EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { + //confounder + c := make([]byte, e.GetConfounderByteSize()) + _, err := rand.Read(c) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) + } + plainBytes := append(c, message...) + plainBytes, _ = common.ZeroPad(plainBytes, e.GetMessageBlockByteSize()) + + // Derive key for encryption from usage + var k []byte + if usage != 0 { + k, err = e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) + } + } + + iv, b, err := e.EncryptData(k, plainBytes) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + + // Generate and append integrity hash + ih, err := common.GetIntegrityHash(plainBytes, key, usage, e) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + b = append(b, ih...) + return iv, b, nil +} + +// DES3DecryptData decrypts the data provided using DES3 and methods specific to the etype provided. +func DES3DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + + if len(data) < des.BlockSize || len(data)%des.BlockSize != 0 { + return []byte{}, errors.New("ciphertext is not a multiple of the block size") + } + block, err := des.NewTripleDESCipher(key) + if err != nil { + return []byte{}, fmt.Errorf("error creating cipher: %v", err) + } + pt := make([]byte, len(data)) + ivz := make([]byte, des.BlockSize) + mode := cipher.NewCBCDecrypter(block, ivz) + mode.CryptBlocks(pt, data) + return pt, nil +} + +// DES3DecryptMessage decrypts the message provided using DES3 and methods specific to the etype provided. +// The integrity of the message is also verified. +func DES3DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { + //Derive the key + k, err := e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return nil, fmt.Errorf("error deriving key: %v", err) + } + // Strip off the checksum from the end + b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) + if err != nil { + return nil, fmt.Errorf("error decrypting: %v", err) + } + //Verify checksum + if !e.VerifyIntegrity(key, ciphertext, b, usage) { + return nil, errors.New("error decrypting: integrity verification failed") + } + //Remove the confounder bytes + return b[e.GetConfounderByteSize():], nil +} + +// VerifyIntegrity verifies the integrity of cipertext bytes ct. +func VerifyIntegrity(key, ct, pt []byte, usage uint32, etype etype.EType) bool { + h := make([]byte, etype.GetHMACBitLength()/8) + copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:]) + expectedMAC, _ := common.GetIntegrityHash(pt, key, usage, etype) + return hmac.Equal(h, expectedMAC) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/keyDerivation.go new file mode 100644 index 00000000..ed9b169c --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/keyDerivation.go @@ -0,0 +1,169 @@ +package rfc3961 + +import ( + "bytes" + + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +const ( + prfconstant = "prf" +) + +// DeriveRandom implements the RFC 3961 defined function: DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state)). +// +// key: base key or protocol key. Likely to be a key from a keytab file. +// +// usage: a constant. +// +// n: block size in bits (not bytes) - note if you use something like aes.BlockSize this is in bytes. +// +// k: key length / key seed length in bits. Eg. for AES256 this value is 256. +// +// e: the encryption etype function to use. +func DeriveRandom(key, usage []byte, e etype.EType) ([]byte, error) { + n := e.GetCypherBlockBitLength() + k := e.GetKeySeedBitLength() + //Ensure the usage constant is at least the size of the cypher block size. Pass it through the nfold algorithm that will "stretch" it if needs be. + nFoldUsage := Nfold(usage, n) + //k-truncate implemented by creating a byte array the size of k (k is in bits hence /8) + out := make([]byte, k/8) + // Keep feeding the output back into the encryption function until it is no longer short than k. + _, K, err := e.EncryptData(key, nFoldUsage) + if err != nil { + return out, err + } + for i := copy(out, K); i < len(out); { + _, K, _ = e.EncryptData(key, K) + i = i + copy(out[i:], K) + } + return out, nil +} + +// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods. +func DeriveKey(protocolKey, usage []byte, e etype.EType) ([]byte, error) { + r, err := e.DeriveRandom(protocolKey, usage) + if err != nil { + return nil, err + } + return e.RandomToKey(r), nil +} + +// RandomToKey returns a key from the bytes provided according to the definition in RFC 3961. +func RandomToKey(b []byte) []byte { + return b +} + +// DES3RandomToKey returns a key from the bytes provided according to the definition in RFC 3961 for DES3 etypes. +func DES3RandomToKey(b []byte) []byte { + r := fixWeakKey(stretch56Bits(b[:7])) + r2 := fixWeakKey(stretch56Bits(b[7:14])) + r = append(r, r2...) + r3 := fixWeakKey(stretch56Bits(b[14:21])) + r = append(r, r3...) + return r +} + +// DES3StringToKey returns a key derived from the string provided according to the definition in RFC 3961 for DES3 etypes. +func DES3StringToKey(secret, salt string, e etype.EType) ([]byte, error) { + s := secret + salt + tkey := e.RandomToKey(Nfold([]byte(s), e.GetKeySeedBitLength())) + return e.DeriveKey(tkey, []byte("kerberos")) +} + +// PseudoRandom function as defined in RFC 3961 +func PseudoRandom(key, b []byte, e etype.EType) ([]byte, error) { + h := e.GetHashFunc()() + h.Write(b) + tmp := h.Sum(nil)[:e.GetMessageBlockByteSize()] + k, err := e.DeriveKey(key, []byte(prfconstant)) + if err != nil { + return []byte{}, err + } + _, prf, err := e.EncryptData(k, tmp) + if err != nil { + return []byte{}, err + } + return prf, nil +} + +func stretch56Bits(b []byte) []byte { + d := make([]byte, len(b), len(b)) + copy(d, b) + var lb byte + for i, v := range d { + bv, nb := calcEvenParity(v) + d[i] = nb + if bv != 0 { + lb = lb | (1 << uint(i+1)) + } else { + lb = lb &^ (1 << uint(i+1)) + } + } + _, lb = calcEvenParity(lb) + d = append(d, lb) + return d +} + +func calcEvenParity(b byte) (uint8, uint8) { + lowestbit := b & 0x01 + // c counter of 1s in the first 7 bits of the byte + var c int + // Iterate over the highest 7 bits (hence p starts at 1 not zero) and count the 1s. + for p := 1; p < 8; p++ { + v := b & (1 << uint(p)) + if v != 0 { + c++ + } + } + if c%2 == 0 { + //Even number of 1s so set parity to 1 + b = b | 1 + } else { + //Odd number of 1s so set parity to 0 + b = b &^ 1 + } + return lowestbit, b +} + +func fixWeakKey(b []byte) []byte { + if weak(b) { + b[7] ^= 0xF0 + } + return b +} + +func weak(b []byte) bool { + // weak keys from https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-67r1.pdf + weakKeys := [4][]byte{ + {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, + {0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE}, + {0xE0, 0xE0, 0xE0, 0xE0, 0xF1, 0xF1, 0xF1, 0xF1}, + {0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E}, + } + semiWeakKeys := [12][]byte{ + {0x01, 0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E}, + {0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E, 0x01}, + {0x01, 0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1}, + {0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1, 0x01}, + {0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE}, + {0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01}, + {0x1F, 0xE0, 0x1F, 0xE0, 0x0E, 0xF1, 0x0E, 0xF1}, + {0xE0, 0x1F, 0xE0, 0x1F, 0xF1, 0x0E, 0xF1, 0x0E}, + {0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E, 0xFE}, + {0xFE, 0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E}, + {0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE}, + {0xFE, 0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1}, + } + for _, k := range weakKeys { + if bytes.Equal(b, k) { + return true + } + } + for _, k := range semiWeakKeys { + if bytes.Equal(b, k) { + return true + } + } + return false +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/nfold.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/nfold.go new file mode 100644 index 00000000..9536b1e3 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/nfold.go @@ -0,0 +1,107 @@ +package rfc3961 + +// Implementation of the n-fold algorithm as defined in RFC 3961. + +/* Credits +This golang implementation of nfold used the following project for help with implementation detail. +Although their source is in java it was helpful as a reference implementation of the RFC. +You can find the source code of their open source project along with license information below. +We acknowledge and are grateful to these developers for their contributions to open source + +Project: Apache Directory (http://http://directory.apache.org/) +https://svn.apache.org/repos/asf/directory/apacheds/tags/1.5.1/kerberos-shared/src/main/java/org/apache/directory/server/kerberos/shared/crypto/encryption/NFold.java +License: http://www.apache.org/licenses/LICENSE-2.0 +*/ + +// Nfold expands the key to ensure it is not smaller than one cipher block. +// Defined in RFC 3961. +// +// m input bytes that will be "stretched" to the least common multiple of n bits and the bit length of m. +func Nfold(m []byte, n int) []byte { + k := len(m) * 8 + + //Get the lowest common multiple of the two bit sizes + lcm := lcm(n, k) + relicate := lcm / k + var sumBytes []byte + + for i := 0; i < relicate; i++ { + rotation := 13 * i + sumBytes = append(sumBytes, rotateRight(m, rotation)...) + } + + nfold := make([]byte, n/8) + sum := make([]byte, n/8) + for i := 0; i < lcm/n; i++ { + for j := 0; j < n/8; j++ { + sum[j] = sumBytes[j+(i*len(sum))] + } + nfold = onesComplementAddition(nfold, sum) + } + return nfold +} + +func onesComplementAddition(n1, n2 []byte) []byte { + numBits := len(n1) * 8 + out := make([]byte, numBits/8) + carry := 0 + for i := numBits - 1; i > -1; i-- { + n1b := getBit(&n1, i) + n2b := getBit(&n2, i) + s := n1b + n2b + carry + + if s == 0 || s == 1 { + setBit(&out, i, s) + carry = 0 + } else if s == 2 { + carry = 1 + } else if s == 3 { + setBit(&out, i, 1) + carry = 1 + } + } + if carry == 1 { + carryArray := make([]byte, len(n1)) + carryArray[len(carryArray)-1] = 1 + out = onesComplementAddition(out, carryArray) + } + return out +} + +func rotateRight(b []byte, step int) []byte { + out := make([]byte, len(b)) + bitLen := len(b) * 8 + for i := 0; i < bitLen; i++ { + v := getBit(&b, i) + setBit(&out, (i+step)%bitLen, v) + } + return out +} + +func lcm(x, y int) int { + return (x * y) / gcd(x, y) +} + +func gcd(x, y int) int { + for y != 0 { + x, y = y, x%y + } + return x +} + +func getBit(b *[]byte, p int) int { + pByte := p / 8 + pBit := uint(p % 8) + vByte := (*b)[pByte] + vInt := int(vByte >> (8 - (pBit + 1)) & 0x0001) + return vInt +} + +func setBit(b *[]byte, p, v int) { + pByte := p / 8 + pBit := uint(p % 8) + oldByte := (*b)[pByte] + var newByte byte + newByte = byte(v<<(8-(pBit+1))) | oldByte + (*b)[pByte] = newByte +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/encryption.go new file mode 100644 index 00000000..5ff89e85 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/encryption.go @@ -0,0 +1,89 @@ +// Package rfc3962 provides encryption and checksum methods as specified in RFC 3962 +package rfc3962 + +import ( + "crypto/rand" + "errors" + "fmt" + + "github.com/jcmturner/aescts/v2" + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 3962. +func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + ivz := make([]byte, e.GetCypherBlockBitLength()/8) + return aescts.Encrypt(key, ivz, data) +} + +// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 3962. +// The encrypted data is concatenated with its integrity hash to create an encrypted message. +func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + //confounder + c := make([]byte, e.GetConfounderByteSize()) + _, err := rand.Read(c) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) + } + plainBytes := append(c, message...) + + // Derive key for encryption from usage + var k []byte + if usage != 0 { + k, err = e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) + } + } + + // Encrypt the data + iv, b, err := e.EncryptData(k, plainBytes) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + + // Generate and append integrity hash + ih, err := common.GetIntegrityHash(plainBytes, key, usage, e) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + b = append(b, ih...) + return iv, b, nil +} + +// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 3962. +func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + ivz := make([]byte, e.GetCypherBlockBitLength()/8) + return aescts.Decrypt(key, ivz, data) +} + +// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 3962. +// The integrity of the message is also verified. +func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { + //Derive the key + k, err := e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return nil, fmt.Errorf("error deriving key: %v", err) + } + // Strip off the checksum from the end + b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) + if err != nil { + return nil, err + } + //Verify checksum + if !e.VerifyIntegrity(key, ciphertext, b, usage) { + return nil, errors.New("integrity verification failed") + } + //Remove the confounder bytes + return b[e.GetConfounderByteSize():], nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/keyDerivation.go new file mode 100644 index 00000000..fb402d97 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/keyDerivation.go @@ -0,0 +1,51 @@ +package rfc3962 + +import ( + "encoding/binary" + "encoding/hex" + "errors" + + "github.com/jcmturner/gofork/x/crypto/pbkdf2" + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +const ( + s2kParamsZero = 4294967296 +) + +// StringToKey returns a key derived from the string provided according to the definition in RFC 3961. +func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) { + i, err := S2KparamsToItertions(s2kparams) + if err != nil { + return nil, err + } + return StringToKeyIter(secret, salt, i, e) +} + +// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0 +func StringToPBKDF2(secret, salt string, iterations int64, e etype.EType) []byte { + return pbkdf2.Key64([]byte(secret), []byte(salt), iterations, int64(e.GetKeyByteSize()), e.GetHashFunc()) +} + +// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 3961. +func StringToKeyIter(secret, salt string, iterations int64, e etype.EType) ([]byte, error) { + tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e)) + return e.DeriveKey(tkey, []byte("kerberos")) +} + +// S2KparamsToItertions converts the string representation of iterations to an integer +func S2KparamsToItertions(s2kparams string) (int64, error) { + //The s2kparams string should be hex string representing 4 bytes + //The 4 bytes represent a number in big endian order + //If the value is zero then the number of iterations should be 4,294,967,296 (2^32) + var i uint32 + if len(s2kparams) != 8 { + return int64(s2kParamsZero), errors.New("invalid s2kparams length") + } + b, err := hex.DecodeString(s2kparams) + if err != nil { + return int64(s2kParamsZero), errors.New("invalid s2kparams, cannot decode string to bytes") + } + i = binary.BigEndian.Uint32(b) + return int64(i), nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/checksum.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/checksum.go new file mode 100644 index 00000000..45276e95 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/checksum.go @@ -0,0 +1,40 @@ +package rfc4757 + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "io" +) + +// Checksum returns a hash of the data in accordance with RFC 4757 +func Checksum(key []byte, usage uint32, data []byte) ([]byte, error) { + // Create hashing key + s := append([]byte(`signaturekey`), byte(0x00)) //includes zero octet at end + mac := hmac.New(md5.New, key) + mac.Write(s) + Ksign := mac.Sum(nil) + + // Format data + tb := UsageToMSMsgType(usage) + p := append(tb, data...) + h := md5.New() + rb := bytes.NewReader(p) + _, err := io.Copy(h, rb) + if err != nil { + return []byte{}, err + } + tmp := h.Sum(nil) + + // Generate HMAC + mac = hmac.New(md5.New, Ksign) + mac.Write(tmp) + return mac.Sum(nil), nil +} + +// HMAC returns a keyed MD5 checksum of the data +func HMAC(key []byte, data []byte) []byte { + mac := hmac.New(md5.New, key) + mac.Write(data) + return mac.Sum(nil) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/encryption.go new file mode 100644 index 00000000..fdebe736 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/encryption.go @@ -0,0 +1,80 @@ +// Package rfc4757 provides encryption and checksum methods as specified in RFC 4757 +package rfc4757 + +import ( + "crypto/hmac" + "crypto/rand" + "crypto/rc4" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto/etype" +) + +// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 4757. +func EncryptData(key, data []byte, e etype.EType) ([]byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + rc4Cipher, err := rc4.NewCipher(key) + if err != nil { + return []byte{}, fmt.Errorf("error creating RC4 cipher: %v", err) + } + ed := make([]byte, len(data)) + copy(ed, data) + rc4Cipher.XORKeyStream(ed, ed) + rc4Cipher.Reset() + return ed, nil +} + +// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 4757. +func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + return EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 4757. +// The encrypted data is concatenated with its RC4 header containing integrity checksum and confounder to create an encrypted message. +func EncryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) { + confounder := make([]byte, e.GetConfounderByteSize()) // size = 8 + _, err := rand.Read(confounder) + if err != nil { + return []byte{}, fmt.Errorf("error generating confounder: %v", err) + } + k1 := key + k2 := HMAC(k1, UsageToMSMsgType(usage)) + toenc := append(confounder, data...) + chksum := HMAC(k2, toenc) + k3 := HMAC(k2, chksum) + + ed, err := EncryptData(k3, toenc, e) + if err != nil { + return []byte{}, fmt.Errorf("error encrypting data: %v", err) + } + + msg := append(chksum, ed...) + return msg, nil +} + +// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 4757. +// The integrity of the message is also verified. +func DecryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) { + checksum := data[:e.GetHMACBitLength()/8] + ct := data[e.GetHMACBitLength()/8:] + _, k2, k3 := deriveKeys(key, checksum, usage, export) + + pt, err := DecryptData(k3, ct, e) + if err != nil { + return []byte{}, fmt.Errorf("error decrypting data: %v", err) + } + + if !VerifyIntegrity(k2, pt, data, e) { + return []byte{}, errors.New("integrity checksum incorrect") + } + return pt[e.GetConfounderByteSize():], nil +} + +// VerifyIntegrity checks the integrity checksum of the data matches that calculated from the decrypted data. +func VerifyIntegrity(key, pt, data []byte, e etype.EType) bool { + chksum := HMAC(key, pt) + return hmac.Equal(chksum, data[:e.GetHMACBitLength()/8]) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/keyDerivation.go new file mode 100644 index 00000000..d1f90c07 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/keyDerivation.go @@ -0,0 +1,40 @@ +package rfc4757 + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + + "golang.org/x/crypto/md4" +) + +// StringToKey returns a key derived from the string provided according to the definition in RFC 4757. +func StringToKey(secret string) ([]byte, error) { + b := make([]byte, len(secret)*2, len(secret)*2) + for i, r := range secret { + u := fmt.Sprintf("%04x", r) + c, err := hex.DecodeString(u) + if err != nil { + return []byte{}, errors.New("character could not be encoded") + } + // Swap round the two bytes to make little endian as we put into byte slice + b[2*i] = c[1] + b[2*i+1] = c[0] + } + r := bytes.NewReader(b) + h := md4.New() + _, err := io.Copy(h, r) + if err != nil { + return []byte{}, err + } + return h.Sum(nil), nil +} + +func deriveKeys(key, checksum []byte, usage uint32, export bool) (k1, k2, k3 []byte) { + k1 = key + k2 = HMAC(k1, UsageToMSMsgType(usage)) + k3 = HMAC(k2, checksum) + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/msgtype.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/msgtype.go new file mode 100644 index 00000000..068588d3 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/msgtype.go @@ -0,0 +1,20 @@ +package rfc4757 + +import "encoding/binary" + +// UsageToMSMsgType converts Kerberos key usage numbers to Microsoft message type encoded as a little-endian four byte slice. +func UsageToMSMsgType(usage uint32) []byte { + // Translate usage numbers to the Microsoft T numbers + switch usage { + case 3: + usage = 8 + case 9: + usage = 8 + case 23: + usage = 13 + } + // Now convert to bytes + tb := make([]byte, 4) // We force an int32 input so we can't go over 4 bytes + binary.PutUvarint(tb, uint64(usage)) + return tb +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/encryption.go new file mode 100644 index 00000000..54cff7b4 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/encryption.go @@ -0,0 +1,125 @@ +// Package rfc8009 provides encryption and checksum methods as specified in RFC 8009 +package rfc8009 + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/rand" + "errors" + "fmt" + + "github.com/jcmturner/aescts/v2" + "github.com/jcmturner/gokrb5/v8/crypto/common" + "github.com/jcmturner/gokrb5/v8/crypto/etype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" +) + +// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 8009. +func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + if len(key) != kl { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + ivz := make([]byte, aes.BlockSize) + return aescts.Encrypt(key, ivz, data) +} + +// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 8009. +// The encrypted data is concatenated with its integrity hash to create an encrypted message. +func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + if len(key) != kl { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key)) + } + if len(key) != e.GetKeyByteSize() { + } + //confounder + c := make([]byte, e.GetConfounderByteSize()) + _, err := rand.Read(c) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) + } + plainBytes := append(c, message...) + + // Derive key for encryption from usage + var k []byte + if usage != 0 { + k, err = e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) + } + } + + // Encrypt the data + iv, b, err := e.EncryptData(k, plainBytes) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + + ivz := make([]byte, e.GetConfounderByteSize()) + ih, err := GetIntegityHash(ivz, b, key, usage, e) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + b = append(b, ih...) + return iv, b, nil +} + +// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 8009. +func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + if len(key) != kl { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key)) + } + ivz := make([]byte, aes.BlockSize) + return aescts.Decrypt(key, ivz, data) +} + +// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 8009. +// The integrity of the message is also verified. +func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { + //Derive the key + k, err := e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return nil, fmt.Errorf("error deriving key: %v", err) + } + // Strip off the checksum from the end + b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) + if err != nil { + return nil, err + } + //Verify checksum + if !e.VerifyIntegrity(key, ciphertext, b, usage) { + return nil, errors.New("integrity verification failed") + } + //Remove the confounder bytes + return b[e.GetConfounderByteSize():], nil +} + +// GetIntegityHash returns a keyed integrity hash of the bytes provided as defined in RFC 8009 +func GetIntegityHash(iv, c, key []byte, usage uint32, e etype.EType) ([]byte, error) { + // Generate and append integrity hash + // Rather than calculating the hash over the confounder and plaintext + // it is calculated over the iv concatenated with the AES cipher output. + ib := append(iv, c...) + return common.GetIntegrityHash(ib, key, usage, e) +} + +// VerifyIntegrity verifies the integrity of cipertext bytes ct. +func VerifyIntegrity(key, ct []byte, usage uint32, etype etype.EType) bool { + h := make([]byte, etype.GetHMACBitLength()/8) + copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:]) + ivz := make([]byte, etype.GetConfounderByteSize()) + ib := append(ivz, ct[:len(ct)-(etype.GetHMACBitLength()/8)]...) + expectedMAC, _ := common.GetIntegrityHash(ib, key, usage, etype) + return hmac.Equal(h, expectedMAC) +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/keyDerivation.go new file mode 100644 index 00000000..e9473222 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/keyDerivation.go @@ -0,0 +1,135 @@ +package rfc8009 + +import ( + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + + "github.com/jcmturner/gokrb5/v8/crypto/etype" + "github.com/jcmturner/gokrb5/v8/iana/etypeID" + "golang.org/x/crypto/pbkdf2" +) + +const ( + s2kParamsZero = 32768 +) + +// DeriveRandom for key derivation as defined in RFC 8009 +func DeriveRandom(protocolKey, usage []byte, e etype.EType) ([]byte, error) { + h := e.GetHashFunc()() + return KDF_HMAC_SHA2(protocolKey, []byte("prf"), usage, h.Size(), e), nil +} + +// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods. +// +// https://tools.ietf.org/html/rfc8009#section-5 +func DeriveKey(protocolKey, label []byte, e etype.EType) []byte { + var context []byte + var kl int + // Key length is longer for aes256-cts-hmac-sha384-192 is it is a Ke or from StringToKey (where label is "kerberos") + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + Swtch: + switch label[len(label)-1] { + case 0x73: + // 0x73 is "s" so label could be kerberos meaning StringToKey so now check if the label is "kerberos" + kerblabel := []byte("kerberos") + if len(label) != len(kerblabel) { + break + } + for i, b := range label { + if b != kerblabel[i] { + kl = e.GetKeySeedBitLength() + break Swtch + } + } + if kl == 0 { + // This is StringToKey + kl = 256 + } + case 0xAA: + // This is a Ke + kl = 256 + } + } + if kl == 0 { + kl = e.GetKeySeedBitLength() + } + return e.RandomToKey(KDF_HMAC_SHA2(protocolKey, label, context, kl, e)) +} + +// RandomToKey returns a key from the bytes provided according to the definition in RFC 8009. +func RandomToKey(b []byte) []byte { + return b +} + +// StringToKey returns a key derived from the string provided according to the definition in RFC 8009. +func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) { + i, err := S2KparamsToItertions(s2kparams) + if err != nil { + return nil, err + } + return StringToKeyIter(secret, salt, i, e) +} + +// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 8009. +func StringToKeyIter(secret, salt string, iterations int, e etype.EType) ([]byte, error) { + tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e)) + return e.DeriveKey(tkey, []byte("kerberos")) +} + +// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0 +func StringToPBKDF2(secret, salt string, iterations int, e etype.EType) []byte { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + return pbkdf2.Key([]byte(secret), []byte(salt), iterations, kl, e.GetHashFunc()) +} + +// KDF_HMAC_SHA2 key derivation: https://tools.ietf.org/html/rfc8009#section-3 +func KDF_HMAC_SHA2(protocolKey, label, context []byte, kl int, e etype.EType) []byte { + //k: Length in bits of the key to be outputted, expressed in big-endian binary representation in 4 bytes. + k := make([]byte, 4, 4) + binary.BigEndian.PutUint32(k, uint32(kl)) + + c := make([]byte, 4, 4) + binary.BigEndian.PutUint32(c, uint32(1)) + c = append(c, label...) + c = append(c, byte(0)) + if len(context) > 0 { + c = append(c, context...) + } + c = append(c, k...) + + mac := hmac.New(e.GetHashFunc(), protocolKey) + mac.Write(c) + return mac.Sum(nil)[:(kl / 8)] +} + +// GetSaltP returns the salt value based on the etype name: https://tools.ietf.org/html/rfc8009#section-4 +func GetSaltP(salt, ename string) string { + b := []byte(ename) + b = append(b, byte(0)) + b = append(b, []byte(salt)...) + return string(b) +} + +// S2KparamsToItertions converts the string representation of iterations to an integer for RFC 8009. +func S2KparamsToItertions(s2kparams string) (int, error) { + var i uint32 + if len(s2kparams) != 8 { + return s2kParamsZero, errors.New("Invalid s2kparams length") + } + b, err := hex.DecodeString(s2kparams) + if err != nil { + return s2kParamsZero, errors.New("Invalid s2kparams, cannot decode string to bytes") + } + i = binary.BigEndian.Uint32(b) + //buf := bytes.NewBuffer(b) + //err = binary.Read(buf, binary.BigEndian, &i) + if err != nil { + return s2kParamsZero, errors.New("Invalid s2kparams, cannot convert to big endian int32") + } + return int(i), nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/MICToken.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/MICToken.go new file mode 100644 index 00000000..ab8daa28 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/MICToken.go @@ -0,0 +1,174 @@ +package gssapi + +import ( + "bytes" + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/types" +) + +// RFC 4121, section 4.2.6.1 + +const ( + // MICTokenFlagSentByAcceptor - this flag indicates the sender is the context acceptor. When not set, it indicates the sender is the context initiator + MICTokenFlagSentByAcceptor = 1 << iota + // MICTokenFlagSealed - this flag indicates confidentiality is provided for. It SHALL NOT be set in MIC tokens + MICTokenFlagSealed + // MICTokenFlagAcceptorSubkey - a subkey asserted by the context acceptor is used to protect the message + MICTokenFlagAcceptorSubkey +) + +const ( + micHdrLen = 16 // Length of the MIC Token's header +) + +// MICToken represents a GSS API MIC token, as defined in RFC 4121. +// It contains the header fields, the payload (this is not transmitted) and +// the checksum, and provides the logic for converting to/from bytes plus +// computing and verifying checksums +type MICToken struct { + // const GSS Token ID: 0x0404 + Flags byte // contains three flags: acceptor, sealed, acceptor subkey + // const Filler: 0xFF 0xFF 0xFF 0xFF 0xFF + SndSeqNum uint64 // sender's sequence number. big-endian + Payload []byte // your data! :) + Checksum []byte // checksum of { payload | header } +} + +// Return the 2 bytes identifying a GSS API MIC token +func getGSSMICTokenID() *[2]byte { + return &[2]byte{0x04, 0x04} +} + +// Return the filler bytes used in header +func fillerBytes() *[5]byte { + return &[5]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF} +} + +// Marshal the MICToken into a byte slice. +// The payload should have been set and the checksum computed, otherwise an error is returned. +func (mt *MICToken) Marshal() ([]byte, error) { + if mt.Checksum == nil { + return nil, errors.New("checksum has not been set") + } + + bytes := make([]byte, micHdrLen+len(mt.Checksum)) + copy(bytes[0:micHdrLen], mt.getMICChecksumHeader()[:]) + copy(bytes[micHdrLen:], mt.Checksum) + + return bytes, nil +} + +// SetChecksum uses the passed encryption key and key usage to compute the checksum over the payload and +// the header, and sets the Checksum field of this MICToken. +// If the payload has not been set or the checksum has already been set, an error is returned. +func (mt *MICToken) SetChecksum(key types.EncryptionKey, keyUsage uint32) error { + if mt.Checksum != nil { + return errors.New("checksum has already been computed") + } + checksum, err := mt.checksum(key, keyUsage) + if err != nil { + return err + } + mt.Checksum = checksum + return nil +} + +// Compute and return the checksum of this token, computed using the passed key and key usage. +// Note: This will NOT update the struct's Checksum field. +func (mt *MICToken) checksum(key types.EncryptionKey, keyUsage uint32) ([]byte, error) { + if mt.Payload == nil { + return nil, errors.New("cannot compute checksum with uninitialized payload") + } + d := make([]byte, micHdrLen+len(mt.Payload)) + copy(d[0:], mt.Payload) + copy(d[len(mt.Payload):], mt.getMICChecksumHeader()) + + encType, err := crypto.GetEtype(key.KeyType) + if err != nil { + return nil, err + } + return encType.GetChecksumHash(key.KeyValue, d, keyUsage) +} + +// Build a header suitable for a checksum computation +func (mt *MICToken) getMICChecksumHeader() []byte { + header := make([]byte, micHdrLen) + copy(header[0:2], getGSSMICTokenID()[:]) + header[2] = mt.Flags + copy(header[3:8], fillerBytes()[:]) + binary.BigEndian.PutUint64(header[8:16], mt.SndSeqNum) + return header +} + +// Verify computes the token's checksum with the provided key and usage, +// and compares it to the checksum present in the token. +// In case of any failure, (false, err) is returned, with err an explanatory error. +func (mt *MICToken) Verify(key types.EncryptionKey, keyUsage uint32) (bool, error) { + computed, err := mt.checksum(key, keyUsage) + if err != nil { + return false, err + } + if !hmac.Equal(computed, mt.Checksum) { + return false, fmt.Errorf( + "checksum mismatch. Computed: %s, Contained in token: %s", + hex.EncodeToString(computed), hex.EncodeToString(mt.Checksum)) + } + return true, nil +} + +// Unmarshal bytes into the corresponding MICToken. +// If expectFromAcceptor is true we expect the token to have been emitted by the gss acceptor, +// and will check the according flag, returning an error if the token does not match the expectation. +func (mt *MICToken) Unmarshal(b []byte, expectFromAcceptor bool) error { + if len(b) < micHdrLen { + return errors.New("bytes shorter than header length") + } + if !bytes.Equal(getGSSMICTokenID()[:], b[0:2]) { + return fmt.Errorf("wrong Token ID, Expected %s, was %s", + hex.EncodeToString(getGSSMICTokenID()[:]), + hex.EncodeToString(b[0:2])) + } + flags := b[2] + isFromAcceptor := flags&MICTokenFlagSentByAcceptor != 0 + if isFromAcceptor && !expectFromAcceptor { + return errors.New("unexpected acceptor flag is set: not expecting a token from the acceptor") + } + if !isFromAcceptor && expectFromAcceptor { + return errors.New("unexpected acceptor flag is not set: expecting a token from the acceptor, not in the initiator") + } + if !bytes.Equal(b[3:8], fillerBytes()[:]) { + return fmt.Errorf("unexpected filler bytes: expecting %s, was %s", + hex.EncodeToString(fillerBytes()[:]), + hex.EncodeToString(b[3:8])) + } + + mt.Flags = flags + mt.SndSeqNum = binary.BigEndian.Uint64(b[8:16]) + mt.Checksum = b[micHdrLen:] + return nil +} + +// NewInitiatorMICToken builds a new initiator token (acceptor flag will be set to 0) and computes the authenticated checksum. +// Other flags are set to 0. +// Note that in certain circumstances you may need to provide a sequence number that has been defined earlier. +// This is currently not supported. +func NewInitiatorMICToken(payload []byte, key types.EncryptionKey) (*MICToken, error) { + token := MICToken{ + Flags: 0x00, + SndSeqNum: 0, + Payload: payload, + } + + if err := token.SetChecksum(key, keyusage.GSSAPI_INITIATOR_SIGN); err != nil { + return nil, err + } + + return &token, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/README.md b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/README.md new file mode 100644 index 00000000..8fdcf70c --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/README.md @@ -0,0 +1,20 @@ +# Notes on GSS-API Negotiation Mechanism +https://tools.ietf.org/html/rfc4178 + +Client sends an initial negotiation message to the server which specifies the list of mechanisms +the client can support in order of decreasing preference. +This message is generated with the ``NewNegTokenInitKrb5`` method. +The message generated by this function specifies only a kerberos v5 mechanism is supported. + +The RFC states that this message can optionally contain the initial mechanism token +for the preferred mechanism (KRB5 in this case) of the client. The ``NewNegTokenInitKrb5`` +includes this in the message. + +The server side responds to this message with a one of four messages: + +| Message Type/State | Description | +|--------------------|-------------| +| accept-completed | indicates that the initiator-selected mechanism was acceptable to the target, and that the security mechanism token embedded in the first negotiation message was sufficient to complete the authentication | +| accept-incomplete | At least one more message is needed from the client to establish security context. | +| reject | Negotiation is being terminated. | +| request-mic | (this state can only be present in the first reply message from the target) indicates that the MIC token exchange is REQUIRED if per-message integrity services are available | \ No newline at end of file diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/contextFlags.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/contextFlags.go new file mode 100644 index 00000000..8c91859b --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/contextFlags.go @@ -0,0 +1,27 @@ +package gssapi + +import "github.com/jcmturner/gofork/encoding/asn1" + +// GSS-API context flags assigned numbers. +const ( + ContextFlagDeleg = 1 + ContextFlagMutual = 2 + ContextFlagReplay = 4 + ContextFlagSequence = 8 + ContextFlagConf = 16 + ContextFlagInteg = 32 + ContextFlagAnon = 64 +) + +// ContextFlags flags for GSSAPI +// DEPRECATED - do not use +type ContextFlags asn1.BitString + +// NewContextFlags creates a new ContextFlags instance +// DEPRECATED - do not use +func NewContextFlags() ContextFlags { + var c ContextFlags + c.BitLength = 32 + c.Bytes = make([]byte, 4) + return c +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/gssapi.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/gssapi.go new file mode 100644 index 00000000..80822319 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/gssapi.go @@ -0,0 +1,202 @@ +// Package gssapi implements Generic Security Services Application Program Interface required for SPNEGO kerberos authentication. +package gssapi + +import ( + "context" + "fmt" + + "github.com/jcmturner/gofork/encoding/asn1" +) + +// GSS-API OID names +const ( + // GSS-API OID names + OIDKRB5 OIDName = "KRB5" // MechType OID for Kerberos 5 + OIDMSLegacyKRB5 OIDName = "MSLegacyKRB5" // MechType OID for Kerberos 5 + OIDSPNEGO OIDName = "SPNEGO" + OIDGSSIAKerb OIDName = "GSSIAKerb" // Indicates the client cannot get a service ticket and asks the server to serve as an intermediate to the target KDC. http://k5wiki.kerberos.org/wiki/Projects/IAKERB#IAKERB_mech +) + +// GSS-API status values +const ( + StatusBadBindings = 1 << iota + StatusBadMech + StatusBadName + StatusBadNameType + StatusBadStatus + StatusBadSig + StatusBadMIC + StatusContextExpired + StatusCredentialsExpired + StatusDefectiveCredential + StatusDefectiveToken + StatusFailure + StatusNoContext + StatusNoCred + StatusBadQOP + StatusUnauthorized + StatusUnavailable + StatusDuplicateElement + StatusNameNotMN + StatusComplete + StatusContinueNeeded + StatusDuplicateToken + StatusOldToken + StatusUnseqToken + StatusGapToken +) + +// ContextToken is an interface for a GSS-API context token. +type ContextToken interface { + Marshal() ([]byte, error) + Unmarshal(b []byte) error + Verify() (bool, Status) + Context() context.Context +} + +/* +CREDENTIAL MANAGEMENT + +GSS_Acquire_cred acquire credentials for use +GSS_Release_cred release credentials after use +GSS_Inquire_cred display information about credentials +GSS_Add_cred construct credentials incrementally +GSS_Inquire_cred_by_mech display per-mechanism credential information + +CONTEXT-LEVEL CALLS + +GSS_Init_sec_context initiate outbound security context +GSS_Accept_sec_context accept inbound security context +GSS_Delete_sec_context flush context when no longer needed +GSS_Process_context_token process received control token on context +GSS_Context_time indicate validity time remaining on context +GSS_Inquire_context display information about context +GSS_Wrap_size_limit determine GSS_Wrap token size limit +GSS_Export_sec_context transfer context to other process +GSS_Import_sec_context import transferred context + +PER-MESSAGE CALLS + +GSS_GetMIC apply integrity check, receive as token separate from message +GSS_VerifyMIC validate integrity check token along with message +GSS_Wrap sign, optionally encrypt, encapsulate +GSS_Unwrap decapsulate, decrypt if needed, validate integrity check + +SUPPORT CALLS + +GSS_Display_status translate status codes to printable form +GSS_Indicate_mechs indicate mech_types supported on local system +GSS_Compare_name compare two names for equality +GSS_Display_name translate name to printable form +GSS_Import_name convert printable name to normalized form +GSS_Release_name free storage of normalized-form name +GSS_Release_buffer free storage of general GSS-allocated object +GSS_Release_OID_set free storage of OID set object +GSS_Create_empty_OID_set create empty OID set +GSS_Add_OID_set_member add member to OID set +GSS_Test_OID_set_member test if OID is member of OID set +GSS_Inquire_names_for_mech indicate name types supported by mechanism +GSS_Inquire_mechs_for_name indicates mechanisms supporting name type +GSS_Canonicalize_name translate name to per-mechanism form +GSS_Export_name externalize per-mechanism name +GSS_Duplicate_name duplicate name object +*/ + +// Mechanism is the GSS-API interface for authentication mechanisms. +type Mechanism interface { + OID() asn1.ObjectIdentifier + AcquireCred() error // acquire credentials for use (eg. AS exchange for KRB5) + InitSecContext() (ContextToken, error) // initiate outbound security context (eg TGS exchange builds AP_REQ to go into ContextToken to send to service) + AcceptSecContext(ct ContextToken) (bool, context.Context, Status) // service verifies the token server side to establish a context + MIC() MICToken // apply integrity check, receive as token separate from message + VerifyMIC(mt MICToken) (bool, error) // validate integrity check token along with message + Wrap(msg []byte) WrapToken // sign, optionally encrypt, encapsulate + Unwrap(wt WrapToken) []byte // decapsulate, decrypt if needed, validate integrity check +} + +// OIDName is the type for defined GSS-API OIDs. +type OIDName string + +// OID returns the OID for the provided OID name. +func (o OIDName) OID() asn1.ObjectIdentifier { + switch o { + case OIDSPNEGO: + return asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 2} + case OIDKRB5: + return asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} + case OIDMSLegacyKRB5: + return asn1.ObjectIdentifier{1, 2, 840, 48018, 1, 2, 2} + case OIDGSSIAKerb: + return asn1.ObjectIdentifier{1, 3, 6, 1, 5, 2, 5} + } + return asn1.ObjectIdentifier{} +} + +// Status is the GSS-API status and implements the error interface. +type Status struct { + Code int + Message string +} + +// Error returns the Status description. +func (s Status) Error() string { + var str string + switch s.Code { + case StatusBadBindings: + str = "channel binding mismatch" + case StatusBadMech: + str = "unsupported mechanism requested" + case StatusBadName: + str = "invalid name provided" + case StatusBadNameType: + str = "name of unsupported type provided" + case StatusBadStatus: + str = "invalid input status selector" + case StatusBadSig: + str = "token had invalid integrity check" + case StatusBadMIC: + str = "preferred alias for GSS_S_BAD_SIG" + case StatusContextExpired: + str = "specified security context expired" + case StatusCredentialsExpired: + str = "expired credentials detected" + case StatusDefectiveCredential: + str = "defective credential detected" + case StatusDefectiveToken: + str = "defective token detected" + case StatusFailure: + str = "failure, unspecified at GSS-API level" + case StatusNoContext: + str = "no valid security context specified" + case StatusNoCred: + str = "no valid credentials provided" + case StatusBadQOP: + str = "unsupported QOP valu" + case StatusUnauthorized: + str = "operation unauthorized" + case StatusUnavailable: + str = "operation unavailable" + case StatusDuplicateElement: + str = "duplicate credential element requested" + case StatusNameNotMN: + str = "name contains multi-mechanism elements" + case StatusComplete: + str = "normal completion" + case StatusContinueNeeded: + str = "continuation call to routine required" + case StatusDuplicateToken: + str = "duplicate per-message token detected" + case StatusOldToken: + str = "timed-out per-message token detected" + case StatusUnseqToken: + str = "reordered (early) per-message token detected" + case StatusGapToken: + str = "skipped predecessor token(s) detected" + default: + str = "unknown GSS-API error status" + } + if s.Message != "" { + return fmt.Sprintf("%s: %s", str, s.Message) + } + return str +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/wrapToken.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/wrapToken.go new file mode 100644 index 00000000..ea7d0543 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/wrapToken.go @@ -0,0 +1,195 @@ +package gssapi + +import ( + "bytes" + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/types" +) + +// RFC 4121, section 4.2.6.2 + +const ( + // HdrLen is the length of the Wrap Token's header + HdrLen = 16 + // FillerByte is a filler in the WrapToken structure + FillerByte byte = 0xFF +) + +// WrapToken represents a GSS API Wrap token, as defined in RFC 4121. +// It contains the header fields, the payload and the checksum, and provides +// the logic for converting to/from bytes plus computing and verifying checksums +type WrapToken struct { + // const GSS Token ID: 0x0504 + Flags byte // contains three flags: acceptor, sealed, acceptor subkey + // const Filler: 0xFF + EC uint16 // checksum length. big-endian + RRC uint16 // right rotation count. big-endian + SndSeqNum uint64 // sender's sequence number. big-endian + Payload []byte // your data! :) + CheckSum []byte // authenticated checksum of { payload | header } +} + +// Return the 2 bytes identifying a GSS API Wrap token +func getGssWrapTokenId() *[2]byte { + return &[2]byte{0x05, 0x04} +} + +// Marshal the WrapToken into a byte slice. +// The payload should have been set and the checksum computed, otherwise an error is returned. +func (wt *WrapToken) Marshal() ([]byte, error) { + if wt.CheckSum == nil { + return nil, errors.New("checksum has not been set") + } + if wt.Payload == nil { + return nil, errors.New("payload has not been set") + } + + pldOffset := HdrLen // Offset of the payload in the token + chkSOffset := HdrLen + len(wt.Payload) // Offset of the checksum in the token + + bytes := make([]byte, chkSOffset+int(wt.EC)) + copy(bytes[0:], getGssWrapTokenId()[:]) + bytes[2] = wt.Flags + bytes[3] = FillerByte + binary.BigEndian.PutUint16(bytes[4:6], wt.EC) + binary.BigEndian.PutUint16(bytes[6:8], wt.RRC) + binary.BigEndian.PutUint64(bytes[8:16], wt.SndSeqNum) + copy(bytes[pldOffset:], wt.Payload) + copy(bytes[chkSOffset:], wt.CheckSum) + return bytes, nil +} + +// SetCheckSum uses the passed encryption key and key usage to compute the checksum over the payload and +// the header, and sets the CheckSum field of this WrapToken. +// If the payload has not been set or the checksum has already been set, an error is returned. +func (wt *WrapToken) SetCheckSum(key types.EncryptionKey, keyUsage uint32) error { + if wt.Payload == nil { + return errors.New("payload has not been set") + } + if wt.CheckSum != nil { + return errors.New("checksum has already been computed") + } + chkSum, cErr := wt.computeCheckSum(key, keyUsage) + if cErr != nil { + return cErr + } + wt.CheckSum = chkSum + return nil +} + +// ComputeCheckSum computes and returns the checksum of this token, computed using the passed key and key usage. +// Note: This will NOT update the struct's Checksum field. +func (wt *WrapToken) computeCheckSum(key types.EncryptionKey, keyUsage uint32) ([]byte, error) { + if wt.Payload == nil { + return nil, errors.New("cannot compute checksum with uninitialized payload") + } + // Build a slice containing { payload | header } + checksumMe := make([]byte, HdrLen+len(wt.Payload)) + copy(checksumMe[0:], wt.Payload) + copy(checksumMe[len(wt.Payload):], getChecksumHeader(wt.Flags, wt.SndSeqNum)) + + encType, err := crypto.GetEtype(key.KeyType) + if err != nil { + return nil, err + } + return encType.GetChecksumHash(key.KeyValue, checksumMe, keyUsage) +} + +// Build a header suitable for a checksum computation +func getChecksumHeader(flags byte, senderSeqNum uint64) []byte { + header := make([]byte, 16) + copy(header[0:], []byte{0x05, 0x04, flags, 0xFF, 0x00, 0x00, 0x00, 0x00}) + binary.BigEndian.PutUint64(header[8:], senderSeqNum) + return header +} + +// Verify computes the token's checksum with the provided key and usage, +// and compares it to the checksum present in the token. +// In case of any failure, (false, Err) is returned, with Err an explanatory error. +func (wt *WrapToken) Verify(key types.EncryptionKey, keyUsage uint32) (bool, error) { + computed, cErr := wt.computeCheckSum(key, keyUsage) + if cErr != nil { + return false, cErr + } + if !hmac.Equal(computed, wt.CheckSum) { + return false, fmt.Errorf( + "checksum mismatch. Computed: %s, Contained in token: %s", + hex.EncodeToString(computed), hex.EncodeToString(wt.CheckSum)) + } + return true, nil +} + +// Unmarshal bytes into the corresponding WrapToken. +// If expectFromAcceptor is true, we expect the token to have been emitted by the gss acceptor, +// and will check the according flag, returning an error if the token does not match the expectation. +func (wt *WrapToken) Unmarshal(b []byte, expectFromAcceptor bool) error { + // Check if we can read a whole header + if len(b) < 16 { + return errors.New("bytes shorter than header length") + } + // Is the Token ID correct? + if !bytes.Equal(getGssWrapTokenId()[:], b[0:2]) { + return fmt.Errorf("wrong Token ID. Expected %s, was %s", + hex.EncodeToString(getGssWrapTokenId()[:]), + hex.EncodeToString(b[0:2])) + } + // Check the acceptor flag + flags := b[2] + isFromAcceptor := flags&0x01 == 1 + if isFromAcceptor && !expectFromAcceptor { + return errors.New("unexpected acceptor flag is set: not expecting a token from the acceptor") + } + if !isFromAcceptor && expectFromAcceptor { + return errors.New("expected acceptor flag is not set: expecting a token from the acceptor, not the initiator") + } + // Check the filler byte + if b[3] != FillerByte { + return fmt.Errorf("unexpected filler byte: expecting 0xFF, was %s ", hex.EncodeToString(b[3:4])) + } + checksumL := binary.BigEndian.Uint16(b[4:6]) + // Sanity check on the checksum length + if int(checksumL) > len(b)-HdrLen { + return fmt.Errorf("inconsistent checksum length: %d bytes to parse, checksum length is %d", len(b), checksumL) + } + + wt.Flags = flags + wt.EC = checksumL + wt.RRC = binary.BigEndian.Uint16(b[6:8]) + wt.SndSeqNum = binary.BigEndian.Uint64(b[8:16]) + wt.Payload = b[16 : len(b)-int(checksumL)] + wt.CheckSum = b[len(b)-int(checksumL):] + return nil +} + +// NewInitiatorWrapToken builds a new initiator token (acceptor flag will be set to 0) and computes the authenticated checksum. +// Other flags are set to 0, and the RRC and sequence number are initialized to 0. +// Note that in certain circumstances you may need to provide a sequence number that has been defined earlier. +// This is currently not supported. +func NewInitiatorWrapToken(payload []byte, key types.EncryptionKey) (*WrapToken, error) { + encType, err := crypto.GetEtype(key.KeyType) + if err != nil { + return nil, err + } + + token := WrapToken{ + Flags: 0x00, // all zeroed out (this is a token sent by the initiator) + // Checksum size: length of output of the HMAC function, in bytes. + EC: uint16(encType.GetHMACBitLength() / 8), + RRC: 0, + SndSeqNum: 0, + Payload: payload, + } + + if err := token.SetCheckSum(key, keyusage.GSSAPI_INITIATOR_SEAL); err != nil { + return nil, err + } + + return &token, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/addrtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/addrtype/constants.go new file mode 100644 index 00000000..457b89d7 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/addrtype/constants.go @@ -0,0 +1,15 @@ +// Package addrtype provides Address type assigned numbers. +package addrtype + +// Address type IDs. +const ( + IPv4 int32 = 2 + Directional int32 = 3 + ChaosNet int32 = 5 + XNS int32 = 6 + ISO int32 = 7 + DECNETPhaseIV int32 = 12 + AppleTalkDDP int32 = 16 + NetBios int32 = 20 + IPv6 int32 = 24 +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/adtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/adtype/constants.go new file mode 100644 index 00000000..e805b746 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/adtype/constants.go @@ -0,0 +1,23 @@ +// Package adtype provides Authenticator type assigned numbers. +package adtype + +// Authenticator type IDs. +const ( + ADIfRelevant int32 = 1 + ADIntendedForServer int32 = 2 + ADIntendedForApplicationClass int32 = 3 + ADKDCIssued int32 = 4 + ADAndOr int32 = 5 + ADMandatoryTicketExtensions int32 = 6 + ADInTicketExtensions int32 = 7 + ADMandatoryForKDC int32 = 8 + OSFDCE int32 = 64 + SESAME int32 = 65 + ADOSFDCEPKICertID int32 = 66 + ADAuthenticationStrength int32 = 70 + ADFXFastArmor int32 = 71 + ADFXFastUsed int32 = 72 + ADWin2KPAC int32 = 128 + ADEtypeNegotiation int32 = 129 + //Reserved values 9-63 +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/asnAppTag/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/asnAppTag/constants.go new file mode 100644 index 00000000..d74cd60e --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/asnAppTag/constants.go @@ -0,0 +1,24 @@ +// Package asnAppTag provides ASN1 application tag numbers. +package asnAppTag + +// ASN1 application tag numbers. +const ( + Ticket = 1 + Authenticator = 2 + EncTicketPart = 3 + ASREQ = 10 + TGSREQ = 12 + ASREP = 11 + TGSREP = 13 + APREQ = 14 + APREP = 15 + KRBSafe = 20 + KRBPriv = 21 + KRBCred = 22 + EncASRepPart = 25 + EncTGSRepPart = 26 + EncAPRepPart = 27 + EncKrbPrivPart = 28 + EncKrbCredPart = 29 + KRBError = 30 +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/chksumtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/chksumtype/constants.go new file mode 100644 index 00000000..93db952d --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/chksumtype/constants.go @@ -0,0 +1,32 @@ +// Package chksumtype provides Kerberos 5 checksum type assigned numbers. +package chksumtype + +// Checksum type IDs. +const ( + //RESERVED : 0 + CRC32 int32 = 1 + RSA_MD4 int32 = 2 + RSA_MD4_DES int32 = 3 + DES_MAC int32 = 4 + DES_MAC_K int32 = 5 + RSA_MD4_DES_K int32 = 6 + RSA_MD5 int32 = 7 + RSA_MD5_DES int32 = 8 + RSA_MD5_DES3 int32 = 9 + SHA1_ID10 int32 = 10 + //UNASSIGNED : 11 + HMAC_SHA1_DES3_KD int32 = 12 + HMAC_SHA1_DES3 int32 = 13 + SHA1_ID14 int32 = 14 + HMAC_SHA1_96_AES128 int32 = 15 + HMAC_SHA1_96_AES256 int32 = 16 + CMAC_CAMELLIA128 int32 = 17 + CMAC_CAMELLIA256 int32 = 18 + HMAC_SHA256_128_AES128 int32 = 19 + HMAC_SHA384_192_AES256 int32 = 20 + //UNASSIGNED : 21-32770 + GSSAPI int32 = 32771 + //UNASSIGNED : 32772-2147483647 + KERB_CHECKSUM_HMAC_MD5_UNSIGNED uint32 = 4294967158 // 0xFFFFFF76 documentation says this is -138 but in an unsigned int this is 4294967158 + KERB_CHECKSUM_HMAC_MD5 int32 = -138 +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/constants.go new file mode 100644 index 00000000..0b8e916d --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/constants.go @@ -0,0 +1,5 @@ +// Package iana provides Kerberos 5 assigned numbers. +package iana + +// PVNO is the Protocol Version Number. +const PVNO = 5 diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/errorcode/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/errorcode/constants.go new file mode 100644 index 00000000..fd756bc5 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/errorcode/constants.go @@ -0,0 +1,155 @@ +// Package errorcode provides Kerberos 5 assigned error codes. +package errorcode + +import "fmt" + +// Kerberos error codes. +const ( + KDC_ERR_NONE int32 = 0 //No error + KDC_ERR_NAME_EXP int32 = 1 //Client's entry in database has expired + KDC_ERR_SERVICE_EXP int32 = 2 //Server's entry in database has expired + KDC_ERR_BAD_PVNO int32 = 3 //Requested protocol version number not supported + KDC_ERR_C_OLD_MAST_KVNO int32 = 4 //Client's key encrypted in old master key + KDC_ERR_S_OLD_MAST_KVNO int32 = 5 //Server's key encrypted in old master key + KDC_ERR_C_PRINCIPAL_UNKNOWN int32 = 6 //Client not found in Kerberos database + KDC_ERR_S_PRINCIPAL_UNKNOWN int32 = 7 //Server not found in Kerberos database + KDC_ERR_PRINCIPAL_NOT_UNIQUE int32 = 8 //Multiple principal entries in database + KDC_ERR_NULL_KEY int32 = 9 //The client or server has a null key + KDC_ERR_CANNOT_POSTDATE int32 = 10 //Ticket not eligible for postdating + KDC_ERR_NEVER_VALID int32 = 11 //Requested starttime is later than end time + KDC_ERR_POLICY int32 = 12 //KDC policy rejects request + KDC_ERR_BADOPTION int32 = 13 //KDC cannot accommodate requested option + KDC_ERR_ETYPE_NOSUPP int32 = 14 //KDC has no support for encryption type + KDC_ERR_SUMTYPE_NOSUPP int32 = 15 //KDC has no support for checksum type + KDC_ERR_PADATA_TYPE_NOSUPP int32 = 16 //KDC has no support for padata type + KDC_ERR_TRTYPE_NOSUPP int32 = 17 //KDC has no support for transited type + KDC_ERR_CLIENT_REVOKED int32 = 18 //Clients credentials have been revoked + KDC_ERR_SERVICE_REVOKED int32 = 19 //Credentials for server have been revoked + KDC_ERR_TGT_REVOKED int32 = 20 //TGT has been revoked + KDC_ERR_CLIENT_NOTYET int32 = 21 //Client not yet valid; try again later + KDC_ERR_SERVICE_NOTYET int32 = 22 //Server not yet valid; try again later + KDC_ERR_KEY_EXPIRED int32 = 23 //Password has expired; change password to reset + KDC_ERR_PREAUTH_FAILED int32 = 24 //Pre-authentication information was invalid + KDC_ERR_PREAUTH_REQUIRED int32 = 25 //Additional pre-authentication required + KDC_ERR_SERVER_NOMATCH int32 = 26 //Requested server and ticket don't match + KDC_ERR_MUST_USE_USER2USER int32 = 27 //Server principal valid for user2user only + KDC_ERR_PATH_NOT_ACCEPTED int32 = 28 //KDC Policy rejects transited path + KDC_ERR_SVC_UNAVAILABLE int32 = 29 //A service is not available + KRB_AP_ERR_BAD_INTEGRITY int32 = 31 //Integrity check on decrypted field failed + KRB_AP_ERR_TKT_EXPIRED int32 = 32 //Ticket expired + KRB_AP_ERR_TKT_NYV int32 = 33 //Ticket not yet valid + KRB_AP_ERR_REPEAT int32 = 34 //Request is a replay + KRB_AP_ERR_NOT_US int32 = 35 //The ticket isn't for us + KRB_AP_ERR_BADMATCH int32 = 36 //Ticket and authenticator don't match + KRB_AP_ERR_SKEW int32 = 37 //Clock skew too great + KRB_AP_ERR_BADADDR int32 = 38 //Incorrect net address + KRB_AP_ERR_BADVERSION int32 = 39 //Protocol version mismatch + KRB_AP_ERR_MSG_TYPE int32 = 40 //Invalid msg type + KRB_AP_ERR_MODIFIED int32 = 41 //Message stream modified + KRB_AP_ERR_BADORDER int32 = 42 //Message out of order + KRB_AP_ERR_BADKEYVER int32 = 44 //Specified version of key is not available + KRB_AP_ERR_NOKEY int32 = 45 //Service key not available + KRB_AP_ERR_MUT_FAIL int32 = 46 //Mutual authentication failed + KRB_AP_ERR_BADDIRECTION int32 = 47 //Incorrect message direction + KRB_AP_ERR_METHOD int32 = 48 //Alternative authentication method required + KRB_AP_ERR_BADSEQ int32 = 49 //Incorrect sequence number in message + KRB_AP_ERR_INAPP_CKSUM int32 = 50 //Inappropriate type of checksum in message + KRB_AP_PATH_NOT_ACCEPTED int32 = 51 //Policy rejects transited path + KRB_ERR_RESPONSE_TOO_BIG int32 = 52 //Response too big for UDP; retry with TCP + KRB_ERR_GENERIC int32 = 60 //Generic error (description in e-text) + KRB_ERR_FIELD_TOOLONG int32 = 61 //Field is too long for this implementation + KDC_ERROR_CLIENT_NOT_TRUSTED int32 = 62 //Reserved for PKINIT + KDC_ERROR_KDC_NOT_TRUSTED int32 = 63 //Reserved for PKINIT + KDC_ERROR_INVALID_SIG int32 = 64 //Reserved for PKINIT + KDC_ERR_KEY_TOO_WEAK int32 = 65 //Reserved for PKINIT + KDC_ERR_CERTIFICATE_MISMATCH int32 = 66 //Reserved for PKINIT + KRB_AP_ERR_NO_TGT int32 = 67 //No TGT available to validate USER-TO-USER + KDC_ERR_WRONG_REALM int32 = 68 //Reserved for future use + KRB_AP_ERR_USER_TO_USER_REQUIRED int32 = 69 //Ticket must be for USER-TO-USER + KDC_ERR_CANT_VERIFY_CERTIFICATE int32 = 70 //Reserved for PKINIT + KDC_ERR_INVALID_CERTIFICATE int32 = 71 //Reserved for PKINIT + KDC_ERR_REVOKED_CERTIFICATE int32 = 72 //Reserved for PKINIT + KDC_ERR_REVOCATION_STATUS_UNKNOWN int32 = 73 //Reserved for PKINIT + KDC_ERR_REVOCATION_STATUS_UNAVAILABLE int32 = 74 //Reserved for PKINIT + KDC_ERR_CLIENT_NAME_MISMATCH int32 = 75 //Reserved for PKINIT + KDC_ERR_KDC_NAME_MISMATCH int32 = 76 //Reserved for PKINIT +) + +// Lookup an error code description. +func Lookup(i int32) string { + if s, ok := errorcodeLookup[i]; ok { + return fmt.Sprintf("(%d) %s", i, s) + } + return fmt.Sprintf("Unknown ErrorCode %d", i) +} + +var errorcodeLookup = map[int32]string{ + KDC_ERR_NONE: "KDC_ERR_NONE No error", + KDC_ERR_NAME_EXP: "KDC_ERR_NAME_EXP Client's entry in database has expired", + KDC_ERR_SERVICE_EXP: "KDC_ERR_SERVICE_EXP Server's entry in database has expired", + KDC_ERR_BAD_PVNO: "KDC_ERR_BAD_PVNO Requested protocol version number not supported", + KDC_ERR_C_OLD_MAST_KVNO: "KDC_ERR_C_OLD_MAST_KVNO Client's key encrypted in old master key", + KDC_ERR_S_OLD_MAST_KVNO: "KDC_ERR_S_OLD_MAST_KVNO Server's key encrypted in old master key", + KDC_ERR_C_PRINCIPAL_UNKNOWN: "KDC_ERR_C_PRINCIPAL_UNKNOWN Client not found in Kerberos database", + KDC_ERR_S_PRINCIPAL_UNKNOWN: "KDC_ERR_S_PRINCIPAL_UNKNOWN Server not found in Kerberos database", + KDC_ERR_PRINCIPAL_NOT_UNIQUE: "KDC_ERR_PRINCIPAL_NOT_UNIQUE Multiple principal entries in database", + KDC_ERR_NULL_KEY: "KDC_ERR_NULL_KEY The client or server has a null key", + KDC_ERR_CANNOT_POSTDATE: "KDC_ERR_CANNOT_POSTDATE Ticket not eligible for postdating", + KDC_ERR_NEVER_VALID: "KDC_ERR_NEVER_VALID Requested starttime is later than end time", + KDC_ERR_POLICY: "KDC_ERR_POLICY KDC policy rejects request", + KDC_ERR_BADOPTION: "KDC_ERR_BADOPTION KDC cannot accommodate requested option", + KDC_ERR_ETYPE_NOSUPP: "KDC_ERR_ETYPE_NOSUPP KDC has no support for encryption type", + KDC_ERR_SUMTYPE_NOSUPP: "KDC_ERR_SUMTYPE_NOSUPP KDC has no support for checksum type", + KDC_ERR_PADATA_TYPE_NOSUPP: "KDC_ERR_PADATA_TYPE_NOSUPP KDC has no support for padata type", + KDC_ERR_TRTYPE_NOSUPP: "KDC_ERR_TRTYPE_NOSUPP KDC has no support for transited type", + KDC_ERR_CLIENT_REVOKED: "KDC_ERR_CLIENT_REVOKED Clients credentials have been revoked", + KDC_ERR_SERVICE_REVOKED: "KDC_ERR_SERVICE_REVOKED Credentials for server have been revoked", + KDC_ERR_TGT_REVOKED: "KDC_ERR_TGT_REVOKED TGT has been revoked", + KDC_ERR_CLIENT_NOTYET: "KDC_ERR_CLIENT_NOTYET Client not yet valid; try again later", + KDC_ERR_SERVICE_NOTYET: "KDC_ERR_SERVICE_NOTYET Server not yet valid; try again later", + KDC_ERR_KEY_EXPIRED: "KDC_ERR_KEY_EXPIRED Password has expired; change password to reset", + KDC_ERR_PREAUTH_FAILED: "KDC_ERR_PREAUTH_FAILED Pre-authentication information was invalid", + KDC_ERR_PREAUTH_REQUIRED: "KDC_ERR_PREAUTH_REQUIRED Additional pre-authentication required", + KDC_ERR_SERVER_NOMATCH: "KDC_ERR_SERVER_NOMATCH Requested server and ticket don't match", + KDC_ERR_MUST_USE_USER2USER: "KDC_ERR_MUST_USE_USER2USER Server principal valid for user2user only", + KDC_ERR_PATH_NOT_ACCEPTED: "KDC_ERR_PATH_NOT_ACCEPTED KDC Policy rejects transited path", + KDC_ERR_SVC_UNAVAILABLE: "KDC_ERR_SVC_UNAVAILABLE A service is not available", + KRB_AP_ERR_BAD_INTEGRITY: "KRB_AP_ERR_BAD_INTEGRITY Integrity check on decrypted field failed", + KRB_AP_ERR_TKT_EXPIRED: "KRB_AP_ERR_TKT_EXPIRED Ticket expired", + KRB_AP_ERR_TKT_NYV: "KRB_AP_ERR_TKT_NYV Ticket not yet valid", + KRB_AP_ERR_REPEAT: "KRB_AP_ERR_REPEAT Request is a replay", + KRB_AP_ERR_NOT_US: "KRB_AP_ERR_NOT_US The ticket isn't for us", + KRB_AP_ERR_BADMATCH: "KRB_AP_ERR_BADMATCH Ticket and authenticator don't match", + KRB_AP_ERR_SKEW: "KRB_AP_ERR_SKEW Clock skew too great", + KRB_AP_ERR_BADADDR: "KRB_AP_ERR_BADADDR Incorrect net address", + KRB_AP_ERR_BADVERSION: "KRB_AP_ERR_BADVERSION Protocol version mismatch", + KRB_AP_ERR_MSG_TYPE: "KRB_AP_ERR_MSG_TYPE Invalid msg type", + KRB_AP_ERR_MODIFIED: "KRB_AP_ERR_MODIFIED Message stream modified", + KRB_AP_ERR_BADORDER: "KRB_AP_ERR_BADORDER Message out of order", + KRB_AP_ERR_BADKEYVER: "KRB_AP_ERR_BADKEYVER Specified version of key is not available", + KRB_AP_ERR_NOKEY: "KRB_AP_ERR_NOKEY Service key not available", + KRB_AP_ERR_MUT_FAIL: "KRB_AP_ERR_MUT_FAIL Mutual authentication failed", + KRB_AP_ERR_BADDIRECTION: "KRB_AP_ERR_BADDIRECTION Incorrect message direction", + KRB_AP_ERR_METHOD: "KRB_AP_ERR_METHOD Alternative authentication method required", + KRB_AP_ERR_BADSEQ: "KRB_AP_ERR_BADSEQ Incorrect sequence number in message", + KRB_AP_ERR_INAPP_CKSUM: "KRB_AP_ERR_INAPP_CKSUM Inappropriate type of checksum in message", + KRB_AP_PATH_NOT_ACCEPTED: "KRB_AP_PATH_NOT_ACCEPTED Policy rejects transited path", + KRB_ERR_RESPONSE_TOO_BIG: "KRB_ERR_RESPONSE_TOO_BIG Response too big for UDP; retry with TCP", + KRB_ERR_GENERIC: "KRB_ERR_GENERIC Generic error (description in e-text)", + KRB_ERR_FIELD_TOOLONG: "KRB_ERR_FIELD_TOOLONG Field is too long for this implementation", + KDC_ERROR_CLIENT_NOT_TRUSTED: "KDC_ERROR_CLIENT_NOT_TRUSTED Reserved for PKINIT", + KDC_ERROR_KDC_NOT_TRUSTED: "KDC_ERROR_KDC_NOT_TRUSTED Reserved for PKINIT", + KDC_ERROR_INVALID_SIG: "KDC_ERROR_INVALID_SIG Reserved for PKINIT", + KDC_ERR_KEY_TOO_WEAK: "KDC_ERR_KEY_TOO_WEAK Reserved for PKINIT", + KDC_ERR_CERTIFICATE_MISMATCH: "KDC_ERR_CERTIFICATE_MISMATCH Reserved for PKINIT", + KRB_AP_ERR_NO_TGT: "KRB_AP_ERR_NO_TGT No TGT available to validate USER-TO-USER", + KDC_ERR_WRONG_REALM: "KDC_ERR_WRONG_REALM Reserved for future use", + KRB_AP_ERR_USER_TO_USER_REQUIRED: "KRB_AP_ERR_USER_TO_USER_REQUIRED Ticket must be for USER-TO-USER", + KDC_ERR_CANT_VERIFY_CERTIFICATE: "KDC_ERR_CANT_VERIFY_CERTIFICATE Reserved for PKINIT", + KDC_ERR_INVALID_CERTIFICATE: "KDC_ERR_INVALID_CERTIFICATE Reserved for PKINIT", + KDC_ERR_REVOKED_CERTIFICATE: "KDC_ERR_REVOKED_CERTIFICATE Reserved for PKINIT", + KDC_ERR_REVOCATION_STATUS_UNKNOWN: "KDC_ERR_REVOCATION_STATUS_UNKNOWN Reserved for PKINIT", + KDC_ERR_REVOCATION_STATUS_UNAVAILABLE: "KDC_ERR_REVOCATION_STATUS_UNAVAILABLE Reserved for PKINIT", + KDC_ERR_CLIENT_NAME_MISMATCH: "KDC_ERR_CLIENT_NAME_MISMATCH Reserved for PKINIT", + KDC_ERR_KDC_NAME_MISMATCH: "KDC_ERR_KDC_NAME_MISMATCH Reserved for PKINIT", +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/etypeID/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/etypeID/constants.go new file mode 100644 index 00000000..46a0d748 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/etypeID/constants.go @@ -0,0 +1,101 @@ +// Package etypeID provides Kerberos 5 encryption type assigned numbers. +package etypeID + +// Kerberos encryption type assigned numbers. +const ( + //RESERVED : 0 + DES_CBC_CRC int32 = 1 + DES_CBC_MD4 int32 = 2 + DES_CBC_MD5 int32 = 3 + DES_CBC_RAW int32 = 4 + DES3_CBC_MD5 int32 = 5 + DES3_CBC_RAW int32 = 6 + DES3_CBC_SHA1 int32 = 7 + DES_HMAC_SHA1 int32 = 8 + DSAWITHSHA1_CMSOID int32 = 9 + MD5WITHRSAENCRYPTION_CMSOID int32 = 10 + SHA1WITHRSAENCRYPTION_CMSOID int32 = 11 + RC2CBC_ENVOID int32 = 12 + RSAENCRYPTION_ENVOID int32 = 13 + RSAES_OAEP_ENV_OID int32 = 14 + DES_EDE3_CBC_ENV_OID int32 = 15 + DES3_CBC_SHA1_KD int32 = 16 + AES128_CTS_HMAC_SHA1_96 int32 = 17 + AES256_CTS_HMAC_SHA1_96 int32 = 18 + AES128_CTS_HMAC_SHA256_128 int32 = 19 + AES256_CTS_HMAC_SHA384_192 int32 = 20 + //UNASSIGNED : 21-22 + RC4_HMAC int32 = 23 + RC4_HMAC_EXP int32 = 24 + CAMELLIA128_CTS_CMAC int32 = 25 + CAMELLIA256_CTS_CMAC int32 = 26 + //UNASSIGNED : 27-64 + SUBKEY_KEYMATERIAL int32 = 65 + //UNASSIGNED : 66-2147483647 +) + +// ETypesByName is a map of EncType names to their assigned EncType number. +var ETypesByName = map[string]int32{ + "des-cbc-crc": DES_CBC_CRC, + "des-cbc-md4": DES_CBC_MD4, + "des-cbc-md5": DES_CBC_MD5, + "des-cbc-raw": DES_CBC_RAW, + "des3-cbc-md5": DES3_CBC_MD5, + "des3-cbc-raw": DES3_CBC_RAW, + "des3-cbc-sha1": DES3_CBC_SHA1, + "des3-hmac-sha1": DES_HMAC_SHA1, + "des3-cbc-sha1-kd": DES3_CBC_SHA1_KD, + "des-hmac-sha1": DES_HMAC_SHA1, + "dsaWithSHA1-CmsOID": DSAWITHSHA1_CMSOID, + "md5WithRSAEncryption-CmsOID": MD5WITHRSAENCRYPTION_CMSOID, + "sha1WithRSAEncryption-CmsOID": SHA1WITHRSAENCRYPTION_CMSOID, + "rc2CBC-EnvOID": RC2CBC_ENVOID, + "rsaEncryption-EnvOID": RSAENCRYPTION_ENVOID, + "rsaES-OAEP-ENV-OID": RSAES_OAEP_ENV_OID, + "des-ede3-cbc-Env-OID": DES_EDE3_CBC_ENV_OID, + "aes128-cts-hmac-sha1-96": AES128_CTS_HMAC_SHA1_96, + "aes128-cts": AES128_CTS_HMAC_SHA1_96, + "aes128-sha1": AES128_CTS_HMAC_SHA1_96, + "aes256-cts-hmac-sha1-96": AES256_CTS_HMAC_SHA1_96, + "aes256-cts": AES256_CTS_HMAC_SHA1_96, + "aes256-sha1": AES256_CTS_HMAC_SHA1_96, + "aes128-cts-hmac-sha256-128": AES128_CTS_HMAC_SHA256_128, + "aes128-sha2": AES128_CTS_HMAC_SHA256_128, + "aes256-cts-hmac-sha384-192": AES256_CTS_HMAC_SHA384_192, + "aes256-sha2": AES256_CTS_HMAC_SHA384_192, + "arcfour-hmac": RC4_HMAC, + "rc4-hmac": RC4_HMAC, + "arcfour-hmac-md5": RC4_HMAC, + "arcfour-hmac-exp": RC4_HMAC_EXP, + "rc4-hmac-exp": RC4_HMAC_EXP, + "arcfour-hmac-md5-exp": RC4_HMAC_EXP, + "camellia128-cts-cmac": CAMELLIA128_CTS_CMAC, + "camellia128-cts": CAMELLIA128_CTS_CMAC, + "camellia256-cts-cmac": CAMELLIA256_CTS_CMAC, + "camellia256-cts": CAMELLIA256_CTS_CMAC, + "subkey-keymaterial": SUBKEY_KEYMATERIAL, +} + +// EtypeSupported resolves the etype name string to the etype ID. +// If zero is returned the etype is not supported by gokrb5. +func EtypeSupported(etype string) int32 { + // Slice of supported enctype IDs + s := []int32{ + AES128_CTS_HMAC_SHA1_96, + AES256_CTS_HMAC_SHA1_96, + AES128_CTS_HMAC_SHA256_128, + AES256_CTS_HMAC_SHA384_192, + DES3_CBC_SHA1_KD, + RC4_HMAC, + } + id := ETypesByName[etype] + if id == 0 { + return id + } + for _, sid := range s { + if id == sid { + return id + } + } + return 0 +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/flags/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/flags/constants.go new file mode 100644 index 00000000..787801f8 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/flags/constants.go @@ -0,0 +1,36 @@ +// Package flags provides Kerberos 5 flag assigned numbers. +package flags + +// Flag values for KRB5 messages and tickets. +const ( + Reserved = 0 + Forwardable = 1 + Forwarded = 2 + Proxiable = 3 + Proxy = 4 + AllowPostDate = 5 + MayPostDate = 5 + PostDated = 6 + Invalid = 7 + Renewable = 8 + Initial = 9 + PreAuthent = 10 + HWAuthent = 11 + OptHardwareAuth = 11 + RequestAnonymous = 12 + TransitedPolicyChecked = 12 + OKAsDelegate = 13 + EncPARep = 15 + Canonicalize = 15 + DisableTransitedCheck = 26 + RenewableOK = 27 + EncTktInSkey = 28 + Renew = 30 + Validate = 31 + + // AP Option Flags + // 0 Reserved for future use. + APOptionUseSessionKey = 1 + APOptionMutualRequired = 2 + // 3-31 Reserved for future use. +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/keyusage/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/keyusage/constants.go new file mode 100644 index 00000000..5b232d1d --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/keyusage/constants.go @@ -0,0 +1,42 @@ +// Package keyusage provides Kerberos 5 key usage assigned numbers. +package keyusage + +// Key usage numbers. +const ( + AS_REQ_PA_ENC_TIMESTAMP = 1 + KDC_REP_TICKET = 2 + AS_REP_ENCPART = 3 + TGS_REQ_KDC_REQ_BODY_AUTHDATA_SESSION_KEY = 4 + TGS_REQ_KDC_REQ_BODY_AUTHDATA_SUB_KEY = 5 + TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM = 6 + TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR = 7 + TGS_REP_ENCPART_SESSION_KEY = 8 + TGS_REP_ENCPART_AUTHENTICATOR_SUB_KEY = 9 + AP_REQ_AUTHENTICATOR_CHKSUM = 10 + AP_REQ_AUTHENTICATOR = 11 + AP_REP_ENCPART = 12 + KRB_PRIV_ENCPART = 13 + KRB_CRED_ENCPART = 14 + KRB_SAFE_CHKSUM = 15 + KERB_NON_KERB_SALT = 16 + KERB_NON_KERB_CKSUM_SALT = 17 + //18. Reserved for future use in Kerberos and related protocols. + AD_KDC_ISSUED_CHKSUM = 19 + //20-21. Reserved for future use in Kerberos and related protocols. + GSSAPI_ACCEPTOR_SEAL = 22 + GSSAPI_ACCEPTOR_SIGN = 23 + GSSAPI_INITIATOR_SEAL = 24 + GSSAPI_INITIATOR_SIGN = 25 + KEY_USAGE_FAST_REQ_CHKSUM = 50 + KEY_USAGE_FAST_ENC = 51 + KEY_USAGE_FAST_REP = 52 + KEY_USAGE_FAST_FINISHED = 53 + KEY_USAGE_ENC_CHALLENGE_CLIENT = 54 + KEY_USAGE_ENC_CHALLENGE_KDC = 55 + KEY_USAGE_AS_REQ = 56 + //26-511. Reserved for future use in Kerberos and related protocols. + //512-1023. Reserved for uses internal to a Kerberos implementation. + //1024. Encryption for application use in protocols that do not specify key usage values + //1025. Checksums for application use in protocols that do not specify key usage values + //1026-2047. Reserved for application use. +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/msgtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/msgtype/constants.go new file mode 100644 index 00000000..ad21810b --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/msgtype/constants.go @@ -0,0 +1,18 @@ +// Package msgtype provides Kerberos 5 message type assigned numbers. +package msgtype + +// KRB message type IDs. +const ( + KRB_AS_REQ = 10 //Request for initial authentication + KRB_AS_REP = 11 //Response to KRB_AS_REQ request + KRB_TGS_REQ = 12 //Request for authentication based on TGT + KRB_TGS_REP = 13 //Response to KRB_TGS_REQ request + KRB_AP_REQ = 14 //Application request to server + KRB_AP_REP = 15 //Response to KRB_AP_REQ_MUTUAL + KRB_RESERVED16 = 16 //Reserved for user-to-user krb_tgt_request + KRB_RESERVED17 = 17 //Reserved for user-to-user krb_tgt_reply + KRB_SAFE = 20 // Safe (checksummed) application message + KRB_PRIV = 21 // Private (encrypted) application message + KRB_CRED = 22 //Private (encrypted) message to forward credentials + KRB_ERROR = 30 //Error response +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/nametype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/nametype/constants.go new file mode 100644 index 00000000..c111a05f --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/nametype/constants.go @@ -0,0 +1,15 @@ +// Package nametype provides Kerberos 5 principal name type numbers. +package nametype + +// Kerberos name type IDs. +const ( + KRB_NT_UNKNOWN int32 = 0 //Name type not known + KRB_NT_PRINCIPAL int32 = 1 //Just the name of the principal as in DCE, or for users + KRB_NT_SRV_INST int32 = 2 //Service and other unique instance (krbtgt) + KRB_NT_SRV_HST int32 = 3 //Service with host name as instance (telnet, rcommands) + KRB_NT_SRV_XHST int32 = 4 //Service with host as remaining components + KRB_NT_UID int32 = 5 //Unique ID + KRB_NT_X500_PRINCIPAL int32 = 6 //Encoded X.509 Distinguished name [RFC2253] + KRB_NT_SMTP_NAME int32 = 7 //Name in form of SMTP email name (e.g., user@example.com) + KRB_NT_ENTERPRISE int32 = 10 //Enterprise name; may be mapped to principal name +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/patype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/patype/constants.go new file mode 100644 index 00000000..aa04f637 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/patype/constants.go @@ -0,0 +1,77 @@ +// Package patype provides Kerberos 5 pre-authentication type assigned numbers. +package patype + +// Kerberos pre-authentication type assigned numbers. +const ( + PA_TGS_REQ int32 = 1 + PA_ENC_TIMESTAMP int32 = 2 + PA_PW_SALT int32 = 3 + //RESERVED : 4 + PA_ENC_UNIX_TIME int32 = 5 + PA_SANDIA_SECUREID int32 = 6 + PA_SESAME int32 = 7 + PA_OSF_DCE int32 = 8 + PA_CYBERSAFE_SECUREID int32 = 9 + PA_AFS3_SALT int32 = 10 + PA_ETYPE_INFO int32 = 11 + PA_SAM_CHALLENGE int32 = 12 + PA_SAM_RESPONSE int32 = 13 + PA_PK_AS_REQ_OLD int32 = 14 + PA_PK_AS_REP_OLD int32 = 15 + PA_PK_AS_REQ int32 = 16 + PA_PK_AS_REP int32 = 17 + PA_PK_OCSP_RESPONSE int32 = 18 + PA_ETYPE_INFO2 int32 = 19 + PA_USE_SPECIFIED_KVNO int32 = 20 + PA_SVR_REFERRAL_INFO int32 = 20 + PA_SAM_REDIRECT int32 = 21 + PA_GET_FROM_TYPED_DATA int32 = 22 + TD_PADATA int32 = 22 + PA_SAM_ETYPE_INFO int32 = 23 + PA_ALT_PRINC int32 = 24 + PA_SERVER_REFERRAL int32 = 25 + //UNASSIGNED : 26-29 + PA_SAM_CHALLENGE2 int32 = 30 + PA_SAM_RESPONSE2 int32 = 31 + //UNASSIGNED : 32-40 + PA_EXTRA_TGT int32 = 41 + //UNASSIGNED : 42-100 + TD_PKINIT_CMS_CERTIFICATES int32 = 101 + TD_KRB_PRINCIPAL int32 = 102 + TD_KRB_REALM int32 = 103 + TD_TRUSTED_CERTIFIERS int32 = 104 + TD_CERTIFICATE_INDEX int32 = 105 + TD_APP_DEFINED_ERROR int32 = 106 + TD_REQ_NONCE int32 = 107 + TD_REQ_SEQ int32 = 108 + TD_DH_PARAMETERS int32 = 109 + //UNASSIGNED : 110 + TD_CMS_DIGEST_ALGORITHMS int32 = 111 + TD_CERT_DIGEST_ALGORITHMS int32 = 112 + //UNASSIGNED : 113-127 + PA_PAC_REQUEST int32 = 128 + PA_FOR_USER int32 = 129 + PA_FOR_X509_USER int32 = 130 + PA_FOR_CHECK_DUPS int32 = 131 + PA_AS_CHECKSUM int32 = 132 + PA_FX_COOKIE int32 = 133 + PA_AUTHENTICATION_SET int32 = 134 + PA_AUTH_SET_SELECTED int32 = 135 + PA_FX_FAST int32 = 136 + PA_FX_ERROR int32 = 137 + PA_ENCRYPTED_CHALLENGE int32 = 138 + //UNASSIGNED : 139-140 + PA_OTP_CHALLENGE int32 = 141 + PA_OTP_REQUEST int32 = 142 + PA_OTP_CONFIRM int32 = 143 + PA_OTP_PIN_CHANGE int32 = 144 + PA_EPAK_AS_REQ int32 = 145 + PA_EPAK_AS_REP int32 = 146 + PA_PKINIT_KX int32 = 147 + PA_PKU2U_NAME int32 = 148 + PA_REQ_ENC_PA_REP int32 = 149 + PA_AS_FRESHNESS int32 = 150 + //UNASSIGNED : 151-164 + PA_SUPPORTED_ETYPES int32 = 165 + PA_EXTENDED_ERROR int32 = 166 +) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/kadmin/changepasswddata.go b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/changepasswddata.go new file mode 100644 index 00000000..2d68eda1 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/changepasswddata.go @@ -0,0 +1,23 @@ +package kadmin + +import ( + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/types" +) + +// ChangePasswdData is the payload to a password change message. +type ChangePasswdData struct { + NewPasswd []byte `asn1:"explicit,tag:0"` + TargName types.PrincipalName `asn1:"explicit,optional,tag:1"` + TargRealm string `asn1:"generalstring,optional,explicit,tag:2"` +} + +// Marshal ChangePasswdData into a byte slice. +func (c *ChangePasswdData) Marshal() ([]byte, error) { + b, err := asn1.Marshal(*c) + if err != nil { + return []byte{}, err + } + //b = asn1tools.AddASNAppTag(b, asnAppTag.) + return b, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/kadmin/message.go b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/message.go new file mode 100644 index 00000000..d1864c99 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/message.go @@ -0,0 +1,114 @@ +package kadmin + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +const ( + verisonHex = "ff80" +) + +// Request message for changing password. +type Request struct { + APREQ messages.APReq + KRBPriv messages.KRBPriv +} + +// Reply message for a password change. +type Reply struct { + MessageLength int + Version int + APREPLength int + APREP messages.APRep + KRBPriv messages.KRBPriv + KRBError messages.KRBError + IsKRBError bool + ResultCode uint16 + Result string +} + +// Marshal a Request into a byte slice. +func (m *Request) Marshal() (b []byte, err error) { + b = []byte{255, 128} // protocol version number: contains the hex constant 0xff80 (big-endian integer). + ab, e := m.APREQ.Marshal() + if e != nil { + err = fmt.Errorf("error marshaling AP_REQ: %v", e) + return + } + if len(ab) > math.MaxUint16 { + err = errors.New("length of AP_REQ greater then max Uint16 size") + return + } + al := make([]byte, 2) + binary.BigEndian.PutUint16(al, uint16(len(ab))) + b = append(b, al...) + b = append(b, ab...) + pb, e := m.KRBPriv.Marshal() + if e != nil { + err = fmt.Errorf("error marshaling KRB_Priv: %v", e) + return + } + b = append(b, pb...) + if len(b)+2 > math.MaxUint16 { + err = errors.New("length of message greater then max Uint16 size") + return + } + ml := make([]byte, 2) + binary.BigEndian.PutUint16(ml, uint16(len(b)+2)) + b = append(ml, b...) + return +} + +// Unmarshal a byte slice into a Reply. +func (m *Reply) Unmarshal(b []byte) error { + m.MessageLength = int(binary.BigEndian.Uint16(b[0:2])) + m.Version = int(binary.BigEndian.Uint16(b[2:4])) + if m.Version != 1 { + return fmt.Errorf("kadmin reply has incorrect protocol version number: %d", m.Version) + } + m.APREPLength = int(binary.BigEndian.Uint16(b[4:6])) + if m.APREPLength != 0 { + err := m.APREP.Unmarshal(b[6 : 6+m.APREPLength]) + if err != nil { + return err + } + err = m.KRBPriv.Unmarshal(b[6+m.APREPLength : m.MessageLength]) + if err != nil { + return err + } + } else { + m.IsKRBError = true + m.KRBError.Unmarshal(b[6:m.MessageLength]) + m.ResultCode, m.Result = parseResponse(m.KRBError.EData) + } + return nil +} + +func parseResponse(b []byte) (c uint16, s string) { + c = binary.BigEndian.Uint16(b[0:2]) + buf := bytes.NewBuffer(b[2:]) + m := make([]byte, len(b)-2) + binary.Read(buf, binary.BigEndian, &m) + s = string(m) + return +} + +// Decrypt the encrypted part of the KRBError within the change password Reply. +func (m *Reply) Decrypt(key types.EncryptionKey) error { + if m.IsKRBError { + return m.KRBError + } + err := m.KRBPriv.DecryptEncPart(key) + if err != nil { + return err + } + m.ResultCode, m.Result = parseResponse(m.KRBPriv.DecryptedEncPart.UserData) + return nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/kadmin/passwd.go b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/passwd.go new file mode 100644 index 00000000..db199bff --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/passwd.go @@ -0,0 +1,68 @@ +// Package kadmin provides Kerberos administration capabilities. +package kadmin + +import ( + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +// ChangePasswdMsg generate a change password request and also return the key needed to decrypt the reply. +func ChangePasswdMsg(cname types.PrincipalName, realm, password string, tkt messages.Ticket, sessionKey types.EncryptionKey) (r Request, k types.EncryptionKey, err error) { + // Create change password data struct and marshal to bytes + chgpasswd := ChangePasswdData{ + NewPasswd: []byte(password), + TargName: cname, + TargRealm: realm, + } + chpwdb, err := chgpasswd.Marshal() + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error marshaling change passwd data") + return + } + + // Generate authenticator + auth, err := types.NewAuthenticator(realm, cname) + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator") + return + } + etype, err := crypto.GetEtype(sessionKey.KeyType) + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey etype") + return + } + err = auth.GenerateSeqNumberAndSubKey(etype.GetETypeID(), etype.GetKeyByteSize()) + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey") + return + } + k = auth.SubKey + + // Generate AP_REQ + APreq, err := messages.NewAPReq(tkt, sessionKey, auth) + if err != nil { + return + } + + // Form the KRBPriv encpart data + kp := messages.EncKrbPrivPart{ + UserData: chpwdb, + Timestamp: auth.CTime, + Usec: auth.Cusec, + SequenceNumber: auth.SeqNumber, + } + kpriv := messages.NewKRBPriv(kp) + err = kpriv.EncryptEncPart(k) + if err != nil { + err = krberror.Errorf(err, krberror.EncryptingError, "error encrypting change passwd data") + return + } + + r = Request{ + APREQ: APreq, + KRBPriv: kpriv, + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go new file mode 100644 index 00000000..5c2e9d79 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go @@ -0,0 +1,530 @@ +// Package keytab implements Kerberos keytabs: https://web.mit.edu/kerberos/krb5-devel/doc/formats/keytab_file_format.html. +package keytab + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "strings" + "time" + "unsafe" + + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/types" +) + +const ( + keytabFirstByte byte = 05 +) + +// Keytab struct. +type Keytab struct { + version uint8 + Entries []entry +} + +// Keytab entry struct. +type entry struct { + Principal principal + Timestamp time.Time + KVNO8 uint8 + Key types.EncryptionKey + KVNO uint32 +} + +func (e entry) String() string { + return fmt.Sprintf("% 4d %s %-56s %2d %-64x", + e.KVNO8, + e.Timestamp.Format("02/01/06 15:04:05"), + e.Principal.String(), + e.Key.KeyType, + e.Key.KeyValue, + ) +} + +// Keytab entry principal struct. +type principal struct { + NumComponents int16 `json:"-"` + Realm string + Components []string + NameType int32 +} + +func (p principal) String() string { + return fmt.Sprintf("%s@%s", strings.Join(p.Components, "/"), p.Realm) +} + +// New creates new, empty Keytab type. +func New() *Keytab { + var e []entry + return &Keytab{ + version: 2, + Entries: e, + } +} + +// GetEncryptionKey returns the EncryptionKey from the Keytab for the newest entry with the required kvno, etype and matching principal. +// If the kvno is zero then the latest kvno will be returned. The kvno is also returned for +func (kt *Keytab) GetEncryptionKey(princName types.PrincipalName, realm string, kvno int, etype int32) (types.EncryptionKey, int, error) { + var key types.EncryptionKey + var t time.Time + var kv int + for _, k := range kt.Entries { + if k.Principal.Realm == realm && len(k.Principal.Components) == len(princName.NameString) && + k.Key.KeyType == etype && + (k.KVNO == uint32(kvno) || kvno == 0) && + k.Timestamp.After(t) { + p := true + for i, n := range k.Principal.Components { + if princName.NameString[i] != n { + p = false + break + } + } + if p { + key = k.Key + kv = int(k.KVNO) + t = k.Timestamp + } + } + } + if len(key.KeyValue) < 1 { + return key, 0, fmt.Errorf("matching key not found in keytab. Looking for %v realm: %v kvno: %v etype: %v", princName.NameString, realm, kvno, etype) + } + return key, kv, nil +} + +// Create a new Keytab entry. +func newEntry() entry { + var b []byte + return entry{ + Principal: newPrincipal(), + Timestamp: time.Time{}, + KVNO8: 0, + Key: types.EncryptionKey{ + KeyType: 0, + KeyValue: b, + }, + KVNO: 0, + } +} + +func (kt Keytab) String() string { + var s string + s = `KVNO Timestamp Principal ET Key +---- ----------------- -------------------------------------------------------- -- ---------------------------------------------------------------- +` + for _, entry := range kt.Entries { + s += entry.String() + "\n" + } + return s +} + +// AddEntry adds an entry to the keytab. The password should be provided in plain text and it will be converted using the defined enctype to be stored. +func (kt *Keytab) AddEntry(principalName, realm, password string, ts time.Time, KVNO uint8, encType int32) error { + // Generate a key from the password + princ, _ := types.ParseSPNString(principalName) + key, _, err := crypto.GetKeyFromPassword(password, princ, realm, encType, types.PADataSequence{}) + if err != nil { + return err + } + + // Populate the keytab entry principal + ktep := newPrincipal() + ktep.NumComponents = int16(len(princ.NameString)) + if kt.version == 1 { + ktep.NumComponents += 1 + } + + ktep.Realm = realm + ktep.Components = princ.NameString + ktep.NameType = princ.NameType + + // Populate the keytab entry + e := newEntry() + e.Principal = ktep + e.Timestamp = ts + e.KVNO8 = KVNO + e.KVNO = uint32(KVNO) + e.Key = key + + kt.Entries = append(kt.Entries, e) + return nil +} + +// Create a new principal. +func newPrincipal() principal { + var c []string + return principal{ + NumComponents: 0, + Realm: "", + Components: c, + NameType: 0, + } +} + +// Load a Keytab file into a Keytab type. +func Load(ktPath string) (*Keytab, error) { + kt := new(Keytab) + b, err := ioutil.ReadFile(ktPath) + if err != nil { + return kt, err + } + err = kt.Unmarshal(b) + return kt, err +} + +// Marshal keytab into byte slice +func (kt *Keytab) Marshal() ([]byte, error) { + b := []byte{keytabFirstByte, kt.version} + for _, e := range kt.Entries { + eb, err := e.marshal(int(kt.version)) + if err != nil { + return b, err + } + b = append(b, eb...) + } + return b, nil +} + +// Write the keytab bytes to io.Writer. +// Returns the number of bytes written +func (kt *Keytab) Write(w io.Writer) (int, error) { + b, err := kt.Marshal() + if err != nil { + return 0, fmt.Errorf("error marshaling keytab: %v", err) + } + return w.Write(b) +} + +// Unmarshal byte slice of Keytab data into Keytab type. +func (kt *Keytab) Unmarshal(b []byte) error { + if len(b) < 2 { + return fmt.Errorf("byte array is less than 2 bytes: %d", len(b)) + } + + //The first byte of the file always has the value 5 + if b[0] != keytabFirstByte { + return errors.New("invalid keytab data. First byte does not equal 5") + } + //Get keytab version + //The 2nd byte contains the version number (1 or 2) + kt.version = b[1] + if kt.version != 1 && kt.version != 2 { + return errors.New("invalid keytab data. Keytab version is neither 1 nor 2") + } + //Version 1 of the file format uses native byte order for integer representations. Version 2 always uses big-endian byte order + var endian binary.ByteOrder + endian = binary.BigEndian + if kt.version == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + // n tracks position in the byte array + n := 2 + l, err := readInt32(b, &n, &endian) + if err != nil { + return err + } + for l != 0 { + if l < 0 { + //Zero padded so skip over + l = l * -1 + n = n + int(l) + } else { + if n < 0 { + return fmt.Errorf("%d can't be less than zero", n) + } + if n+int(l) > len(b) { + return fmt.Errorf("%s's length is less than %d", b, n+int(l)) + } + eb := b[n : n+int(l)] + n = n + int(l) + ke := newEntry() + // p keeps track as to where we are in the byte stream + var p int + var err error + parsePrincipal(eb, &p, kt, &ke, &endian) + ke.Timestamp, err = readTimestamp(eb, &p, &endian) + if err != nil { + return err + } + rei8, err := readInt8(eb, &p, &endian) + if err != nil { + return err + } + ke.KVNO8 = uint8(rei8) + rei16, err := readInt16(eb, &p, &endian) + if err != nil { + return err + } + ke.Key.KeyType = int32(rei16) + rei16, err = readInt16(eb, &p, &endian) + if err != nil { + return err + } + kl := int(rei16) + ke.Key.KeyValue, err = readBytes(eb, &p, kl, &endian) + if err != nil { + return err + } + // The 32-bit key version overrides the 8-bit key version. + // If at least 4 bytes are left after the other fields are read and they are non-zero + // this indicates the 32-bit version is present. + if len(eb)-p >= 4 { + // The 32-bit key may be present + ri32, err := readInt32(eb, &p, &endian) + if err != nil { + return err + } + ke.KVNO = uint32(ri32) + } + if ke.KVNO == 0 { + // Handles if the value from the last 4 bytes was zero and also if there are not the 4 bytes present. Makes sense to put the same value here as KVNO8 + ke.KVNO = uint32(ke.KVNO8) + } + // Add the entry to the keytab + kt.Entries = append(kt.Entries, ke) + } + // Check if there are still 4 bytes left to read + // Also check that n is greater than zero + if n < 0 || n > len(b) || len(b[n:]) < 4 { + break + } + // Read the size of the next entry + l, err = readInt32(b, &n, &endian) + if err != nil { + return err + } + } + return nil +} + +func (e entry) marshal(v int) ([]byte, error) { + var b []byte + pb, err := e.Principal.marshal(v) + if err != nil { + return b, err + } + b = append(b, pb...) + + var endian binary.ByteOrder + endian = binary.BigEndian + if v == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + + t := make([]byte, 9) + endian.PutUint32(t[0:4], uint32(e.Timestamp.Unix())) + t[4] = e.KVNO8 + endian.PutUint16(t[5:7], uint16(e.Key.KeyType)) + endian.PutUint16(t[7:9], uint16(len(e.Key.KeyValue))) + b = append(b, t...) + + buf := new(bytes.Buffer) + err = binary.Write(buf, endian, e.Key.KeyValue) + if err != nil { + return b, err + } + b = append(b, buf.Bytes()...) + + t = make([]byte, 4) + endian.PutUint32(t, e.KVNO) + b = append(b, t...) + + // Add the length header + t = make([]byte, 4) + endian.PutUint32(t, uint32(len(b))) + b = append(t, b...) + return b, nil +} + +// Parse the Keytab bytes of a principal into a Keytab entry's principal. +func parsePrincipal(b []byte, p *int, kt *Keytab, ke *entry, e *binary.ByteOrder) error { + var err error + ke.Principal.NumComponents, err = readInt16(b, p, e) + if err != nil { + return err + } + if kt.version == 1 { + //In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2 + ke.Principal.NumComponents-- + } + lenRealm, err := readInt16(b, p, e) + if err != nil { + return err + } + realmB, err := readBytes(b, p, int(lenRealm), e) + if err != nil { + return err + } + ke.Principal.Realm = string(realmB) + for i := 0; i < int(ke.Principal.NumComponents); i++ { + l, err := readInt16(b, p, e) + if err != nil { + return err + } + compB, err := readBytes(b, p, int(l), e) + if err != nil { + return err + } + ke.Principal.Components = append(ke.Principal.Components, string(compB)) + } + if kt.version != 1 { + //Name Type is omitted in version 1 + ke.Principal.NameType, err = readInt32(b, p, e) + if err != nil { + return err + } + } + return nil +} + +func (p principal) marshal(v int) ([]byte, error) { + //var b []byte + b := make([]byte, 2) + var endian binary.ByteOrder + endian = binary.BigEndian + if v == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + endian.PutUint16(b[0:], uint16(p.NumComponents)) + realm, err := marshalString(p.Realm, v) + if err != nil { + return b, err + } + b = append(b, realm...) + for _, c := range p.Components { + cb, err := marshalString(c, v) + if err != nil { + return b, err + } + b = append(b, cb...) + } + if v != 1 { + t := make([]byte, 4) + endian.PutUint32(t, uint32(p.NameType)) + b = append(b, t...) + } + return b, nil +} + +func marshalString(s string, v int) ([]byte, error) { + sb := []byte(s) + b := make([]byte, 2) + var endian binary.ByteOrder + endian = binary.BigEndian + if v == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + endian.PutUint16(b[0:], uint16(len(sb))) + buf := new(bytes.Buffer) + err := binary.Write(buf, endian, sb) + if err != nil { + return b, err + } + b = append(b, buf.Bytes()...) + return b, err +} + +// Read bytes representing a timestamp. +func readTimestamp(b []byte, p *int, e *binary.ByteOrder) (time.Time, error) { + i32, err := readInt32(b, p, e) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(i32), 0), nil +} + +// Read bytes representing an eight bit integer. +func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 1) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+1) + } + buf := bytes.NewBuffer(b[*p : *p+1]) + binary.Read(buf, *e, &i) + *p++ + return +} + +// Read bytes representing a sixteen bit integer. +func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 2) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+2) + } + + buf := bytes.NewBuffer(b[*p : *p+2]) + binary.Read(buf, *e, &i) + *p += 2 + return +} + +// Read bytes representing a thirty two bit integer. +func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 4) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+4) + } + + buf := bytes.NewBuffer(b[*p : *p+4]) + binary.Read(buf, *e, &i) + *p += 4 + return +} + +func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) ([]byte, error) { + if s < 0 { + return nil, fmt.Errorf("%d cannot be less than zero", s) + } + i := *p + s + if i > len(b) { + return nil, fmt.Errorf("%s's length is greater than %d", b, i) + } + buf := bytes.NewBuffer(b[*p:i]) + r := make([]byte, s) + if err := binary.Read(buf, *e, &r); err != nil { + return nil, err + } + *p += s + return r, nil +} + +func isNativeEndianLittle() bool { + var x = 0x012345678 + var p = unsafe.Pointer(&x) + var bp = (*[4]byte)(p) + + var endian bool + if 0x01 == bp[0] { + endian = false + } else if (0x78 & 0xff) == (bp[0] & 0xff) { + endian = true + } else { + // Default to big endian + endian = false + } + return endian +} + +// JSON return information about the keys held in the keytab in a JSON format. +func (kt *Keytab) JSON() (string, error) { + b, err := json.MarshalIndent(kt, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/krberror/error.go b/vendor/github.com/jcmturner/gokrb5/v8/krberror/error.go new file mode 100644 index 00000000..01c6d990 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/krberror/error.go @@ -0,0 +1,67 @@ +// Package krberror provides error type and functions for gokrb5. +package krberror + +import ( + "fmt" + "strings" +) + +// Error type descriptions. +const ( + separator = " < " + EncodingError = "Encoding_Error" + NetworkingError = "Networking_Error" + DecryptingError = "Decrypting_Error" + EncryptingError = "Encrypting_Error" + ChksumError = "Checksum_Error" + KRBMsgError = "KRBMessage_Handling_Error" + ConfigError = "Configuration_Error" + KDCError = "KDC_Error" +) + +// Krberror is an error type for gokrb5 +type Krberror struct { + RootCause string + EText []string +} + +// Error function to implement the error interface. +func (e Krberror) Error() string { + return fmt.Sprintf("[Root cause: %s] ", e.RootCause) + strings.Join(e.EText, separator) +} + +// Add another error statement to the error. +func (e *Krberror) Add(et string, s string) { + e.EText = append([]string{fmt.Sprintf("%s: %s", et, s)}, e.EText...) +} + +// New creates a new instance of Krberror. +func New(et, s string) Krberror { + return Krberror{ + RootCause: et, + EText: []string{s}, + } +} + +// Errorf appends to or creates a new Krberror. +func Errorf(err error, et, format string, a ...interface{}) Krberror { + if e, ok := err.(Krberror); ok { + e.Add(et, fmt.Sprintf(format, a...)) + return e + } + return NewErrorf(et, format+": %s", append(a, err)...) +} + +// NewErrorf creates a new Krberror from a formatted string. +func NewErrorf(et, format string, a ...interface{}) Krberror { + var s string + if len(a) > 0 { + s = fmt.Sprintf("%s: %s", et, fmt.Sprintf(format, a...)) + } else { + s = fmt.Sprintf("%s: %s", et, format) + } + return Krberror{ + RootCause: et, + EText: []string{s}, + } +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/APRep.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/APRep.go new file mode 100644 index 00000000..555fb807 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/APRep.go @@ -0,0 +1,49 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +// APRep implements RFC 4120 KRB_AP_REP: https://tools.ietf.org/html/rfc4120#section-5.5.2. +type APRep struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + EncPart types.EncryptedData `asn1:"explicit,tag:2"` +} + +// EncAPRepPart is the encrypted part of KRB_AP_REP. +type EncAPRepPart struct { + CTime time.Time `asn1:"generalized,explicit,tag:0"` + Cusec int `asn1:"explicit,tag:1"` + Subkey types.EncryptionKey `asn1:"optional,explicit,tag:2"` + SequenceNumber int64 `asn1:"optional,explicit,tag:3"` +} + +// Unmarshal bytes b into the APRep struct. +func (a *APRep) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREP)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_AP_REP + if a.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_AP_REP. Expected: %v; Actual: %v", expectedMsgType, a.MsgType) + } + return nil +} + +// Unmarshal bytes b into the APRep encrypted part struct. +func (a *EncAPRepPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncAPRepPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "AP_REP unmarshal error") + } + return nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/APReq.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/APReq.go new file mode 100644 index 00000000..18360797 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/APReq.go @@ -0,0 +1,199 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +type marshalAPReq struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + APOptions asn1.BitString `asn1:"explicit,tag:2"` + // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag + Ticket asn1.RawValue `asn1:"explicit,tag:3"` + EncryptedAuthenticator types.EncryptedData `asn1:"explicit,tag:4"` +} + +// APReq implements RFC 4120 KRB_AP_REQ: https://tools.ietf.org/html/rfc4120#section-5.5.1. +type APReq struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + APOptions asn1.BitString `asn1:"explicit,tag:2"` + Ticket Ticket `asn1:"explicit,tag:3"` + EncryptedAuthenticator types.EncryptedData `asn1:"explicit,tag:4"` + Authenticator types.Authenticator `asn1:"optional"` +} + +// NewAPReq generates a new KRB_AP_REQ struct. +func NewAPReq(tkt Ticket, sessionKey types.EncryptionKey, auth types.Authenticator) (APReq, error) { + var a APReq + ed, err := encryptAuthenticator(auth, sessionKey, tkt) + if err != nil { + return a, krberror.Errorf(err, krberror.KRBMsgError, "error creating Authenticator for AP_REQ") + } + a = APReq{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_AP_REQ, + APOptions: types.NewKrbFlags(), + Ticket: tkt, + EncryptedAuthenticator: ed, + } + return a, nil +} + +// Encrypt Authenticator +func encryptAuthenticator(a types.Authenticator, sessionKey types.EncryptionKey, tkt Ticket) (types.EncryptedData, error) { + var ed types.EncryptedData + m, err := a.Marshal() + if err != nil { + return ed, krberror.Errorf(err, krberror.EncodingError, "marshaling error of EncryptedData form of Authenticator") + } + usage := authenticatorKeyUsage(tkt.SName) + ed, err = crypto.GetEncryptedData(m, sessionKey, uint32(usage), tkt.EncPart.KVNO) + if err != nil { + return ed, krberror.Errorf(err, krberror.EncryptingError, "error encrypting Authenticator") + } + return ed, nil +} + +// DecryptAuthenticator decrypts the Authenticator within the AP_REQ. +// sessionKey may simply be the key within the decrypted EncPart of the ticket within the AP_REQ. +func (a *APReq) DecryptAuthenticator(sessionKey types.EncryptionKey) error { + usage := authenticatorKeyUsage(a.Ticket.SName) + ab, e := crypto.DecryptEncPart(a.EncryptedAuthenticator, sessionKey, uint32(usage)) + if e != nil { + return fmt.Errorf("error decrypting authenticator: %v", e) + } + err := a.Authenticator.Unmarshal(ab) + if err != nil { + return fmt.Errorf("error unmarshaling authenticator: %v", err) + } + return nil +} + +func authenticatorKeyUsage(pn types.PrincipalName) int { + if pn.NameString[0] == "krbtgt" { + return keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR + } + return keyusage.AP_REQ_AUTHENTICATOR +} + +// Unmarshal bytes b into the APReq struct. +func (a *APReq) Unmarshal(b []byte) error { + var m marshalAPReq + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREQ)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "unmarshal error of AP_REQ") + } + if m.MsgType != msgtype.KRB_AP_REQ { + return NewKRBError(types.PrincipalName{}, "", errorcode.KRB_AP_ERR_MSG_TYPE, errorcode.Lookup(errorcode.KRB_AP_ERR_MSG_TYPE)) + } + a.PVNO = m.PVNO + a.MsgType = m.MsgType + a.APOptions = m.APOptions + a.EncryptedAuthenticator = m.EncryptedAuthenticator + a.Ticket, err = unmarshalTicket(m.Ticket.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "unmarshaling error of Ticket within AP_REQ") + } + return nil +} + +// Marshal APReq struct. +func (a *APReq) Marshal() ([]byte, error) { + m := marshalAPReq{ + PVNO: a.PVNO, + MsgType: a.MsgType, + APOptions: a.APOptions, + EncryptedAuthenticator: a.EncryptedAuthenticator, + } + var b []byte + b, err := a.Ticket.Marshal() + if err != nil { + return b, err + } + m.Ticket = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 3, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "marshaling error of AP_REQ") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.APREQ) + return mk, nil +} + +// Verify an AP_REQ using service's keytab, spn and max acceptable clock skew duration. +// The service ticket encrypted part and authenticator will be decrypted as part of this operation. +func (a *APReq) Verify(kt *keytab.Keytab, d time.Duration, cAddr types.HostAddress, snameOverride *types.PrincipalName) (bool, error) { + // Decrypt ticket's encrypted part with service key + //TODO decrypt with service's session key from its TGT is use-to-user. Need to figure out how to get TGT. + //if types.IsFlagSet(&a.APOptions, flags.APOptionUseSessionKey) { + // err := a.Ticket.Decrypt(tgt.DecryptedEncPart.Key) + // if err != nil { + // return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of ticket provided using session key") + // } + //} else { + // err := a.Ticket.DecryptEncPart(*kt, &a.Ticket.SName) + // if err != nil { + // return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of service ticket provided") + // } + //} + sname := &a.Ticket.SName + if snameOverride != nil { + sname = snameOverride + } + err := a.Ticket.DecryptEncPart(kt, sname) + if err != nil { + return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of service ticket provided") + } + + // Check time validity of ticket + ok, err := a.Ticket.Valid(d) + if err != nil || !ok { + return ok, err + } + + // Check client's address is listed in the client addresses in the ticket + if len(a.Ticket.DecryptedEncPart.CAddr) > 0 { + //If client addresses are present check if any of them match the source IP that sent the APReq + //If there is no match return KRB_AP_ERR_BADADDR error. + if !types.HostAddressesContains(a.Ticket.DecryptedEncPart.CAddr, cAddr) { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BADADDR, "client address not within the list contained in the service ticket") + } + } + + // Decrypt authenticator with session key from ticket's encrypted part + err = a.DecryptAuthenticator(a.Ticket.DecryptedEncPart.Key) + if err != nil { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BAD_INTEGRITY, "could not decrypt authenticator") + } + + // Check CName in authenticator is the same as that in the ticket + if !a.Authenticator.CName.Equal(a.Ticket.DecryptedEncPart.CName) { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BADMATCH, "CName in Authenticator does not match that in service ticket") + } + + // Check the clock skew between the client and the service server + ct := a.Authenticator.CTime.Add(time.Duration(a.Authenticator.Cusec) * time.Microsecond) + t := time.Now().UTC() + if t.Sub(ct) > d || ct.Sub(t) > d { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_SKEW, fmt.Sprintf("clock skew with client too large. greater than %v seconds", d)) + } + return true, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCRep.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCRep.go new file mode 100644 index 00000000..69df9f0f --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCRep.go @@ -0,0 +1,360 @@ +package messages + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.4.2 + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/credentials" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/flags" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/iana/patype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +type marshalKDCRep struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + PAData types.PADataSequence `asn1:"explicit,optional,tag:2"` + CRealm string `asn1:"generalstring,explicit,tag:3"` + CName types.PrincipalName `asn1:"explicit,tag:4"` + // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag + Ticket asn1.RawValue `asn1:"explicit,tag:5"` + EncPart types.EncryptedData `asn1:"explicit,tag:6"` +} + +// KDCRepFields represents the KRB_KDC_REP fields. +type KDCRepFields struct { + PVNO int + MsgType int + PAData []types.PAData + CRealm string + CName types.PrincipalName + Ticket Ticket + EncPart types.EncryptedData + DecryptedEncPart EncKDCRepPart +} + +// ASRep implements RFC 4120 KRB_AS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2. +type ASRep struct { + KDCRepFields +} + +// TGSRep implements RFC 4120 KRB_TGS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2. +type TGSRep struct { + KDCRepFields +} + +// EncKDCRepPart is the encrypted part of KRB_KDC_REP. +type EncKDCRepPart struct { + Key types.EncryptionKey `asn1:"explicit,tag:0"` + LastReqs []LastReq `asn1:"explicit,tag:1"` + Nonce int `asn1:"explicit,tag:2"` + KeyExpiration time.Time `asn1:"generalized,explicit,optional,tag:3"` + Flags asn1.BitString `asn1:"explicit,tag:4"` + AuthTime time.Time `asn1:"generalized,explicit,tag:5"` + StartTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + EndTime time.Time `asn1:"generalized,explicit,tag:7"` + RenewTill time.Time `asn1:"generalized,explicit,optional,tag:8"` + SRealm string `asn1:"generalstring,explicit,tag:9"` + SName types.PrincipalName `asn1:"explicit,tag:10"` + CAddr []types.HostAddress `asn1:"explicit,optional,tag:11"` + EncPAData types.PADataSequence `asn1:"explicit,optional,tag:12"` +} + +// LastReq part of KRB_KDC_REP. +type LastReq struct { + LRType int32 `asn1:"explicit,tag:0"` + LRValue time.Time `asn1:"generalized,explicit,tag:1"` +} + +// Unmarshal bytes b into the ASRep struct. +func (k *ASRep) Unmarshal(b []byte) error { + var m marshalKDCRep + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREP)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + if m.MsgType != msgtype.KRB_AS_REP { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an AS_REP. Expected: %v; Actual: %v", msgtype.KRB_AS_REP, m.MsgType) + } + //Process the raw ticket within + tkt, err := unmarshalTicket(m.Ticket.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within AS_REP") + } + k.KDCRepFields = KDCRepFields{ + PVNO: m.PVNO, + MsgType: m.MsgType, + PAData: m.PAData, + CRealm: m.CRealm, + CName: m.CName, + Ticket: tkt, + EncPart: m.EncPart, + } + return nil +} + +// Marshal ASRep struct. +func (k *ASRep) Marshal() ([]byte, error) { + m := marshalKDCRep{ + PVNO: k.PVNO, + MsgType: k.MsgType, + PAData: k.PAData, + CRealm: k.CRealm, + CName: k.CName, + EncPart: k.EncPart, + } + b, err := k.Ticket.Marshal() + if err != nil { + return []byte{}, err + } + m.Ticket = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 5, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REP") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.ASREP) + return mk, nil +} + +// Unmarshal bytes b into the TGSRep struct. +func (k *TGSRep) Unmarshal(b []byte) error { + var m marshalKDCRep + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREP)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + if m.MsgType != msgtype.KRB_TGS_REP { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an TGS_REP. Expected: %v; Actual: %v", msgtype.KRB_TGS_REP, m.MsgType) + } + //Process the raw ticket within + tkt, err := unmarshalTicket(m.Ticket.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within TGS_REP") + } + k.KDCRepFields = KDCRepFields{ + PVNO: m.PVNO, + MsgType: m.MsgType, + PAData: m.PAData, + CRealm: m.CRealm, + CName: m.CName, + Ticket: tkt, + EncPart: m.EncPart, + } + return nil +} + +// Marshal TGSRep struct. +func (k *TGSRep) Marshal() ([]byte, error) { + m := marshalKDCRep{ + PVNO: k.PVNO, + MsgType: k.MsgType, + PAData: k.PAData, + CRealm: k.CRealm, + CName: k.CName, + EncPart: k.EncPart, + } + b, err := k.Ticket.Marshal() + if err != nil { + return []byte{}, err + } + m.Ticket = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 5, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling TGS_REP") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.TGSREP) + return mk, nil +} + +// Unmarshal bytes b into encrypted part of KRB_KDC_REP. +func (e *EncKDCRepPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncASRepPart)) + if err != nil { + // Try using tag 26 + // Ref: RFC 4120 - mentions that some implementations use application tag number 26 wether or not the reply is + // a AS-REP or a TGS-REP. + _, err = asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncTGSRepPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part within KDC_REP") + } + } + return nil +} + +// Marshal encrypted part of KRB_KDC_REP. +func (e *EncKDCRepPart) Marshal() ([]byte, error) { + b, err := asn1.Marshal(*e) + if err != nil { + return b, krberror.Errorf(err, krberror.EncodingError, "marshaling error of AS_REP encpart") + } + b = asn1tools.AddASNAppTag(b, asnAppTag.EncASRepPart) + return b, nil +} + +// DecryptEncPart decrypts the encrypted part of an AS_REP. +func (k *ASRep) DecryptEncPart(c *credentials.Credentials) (types.EncryptionKey, error) { + var key types.EncryptionKey + var err error + if c.HasKeytab() { + key, _, err = c.Keytab().GetEncryptionKey(k.CName, k.CRealm, k.EncPart.KVNO, k.EncPart.EType) + if err != nil { + return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") + } + } + if c.HasPassword() { + key, _, err = crypto.GetKeyFromPassword(c.Password(), k.CName, k.CRealm, k.EncPart.EType, k.PAData) + if err != nil { + return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") + } + } + if !c.HasKeytab() && !c.HasPassword() { + return key, krberror.NewErrorf(krberror.DecryptingError, "no secret available in credentials to perform decryption of AS_REP encrypted part") + } + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.AS_REP_ENCPART) + if err != nil { + return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") + } + var denc EncKDCRepPart + err = denc.Unmarshal(b) + if err != nil { + return key, krberror.Errorf(err, krberror.EncodingError, "error unmarshaling decrypted encpart of AS_REP") + } + k.DecryptedEncPart = denc + return key, nil +} + +// Verify checks the validity of AS_REP message. +func (k *ASRep) Verify(cfg *config.Config, creds *credentials.Credentials, asReq ASReq) (bool, error) { + //Ref RFC 4120 Section 3.1.5 + if !k.CName.Equal(asReq.ReqBody.CName) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.CName, k.CName) + } + if k.CRealm != asReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "CRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.CRealm) + } + key, err := k.DecryptEncPart(creds) + if err != nil { + return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting EncPart of AS_REP") + } + if k.DecryptedEncPart.Nonce != asReq.ReqBody.Nonce { + return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request") + } + if !k.DecryptedEncPart.SName.Equal(asReq.ReqBody.SName) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", asReq.ReqBody.SName, k.DecryptedEncPart.SName) + } + if k.DecryptedEncPart.SRealm != asReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.DecryptedEncPart.SRealm) + } + if len(asReq.ReqBody.Addresses) > 0 { + if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, asReq.ReqBody.Addresses) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the AS_REP does not match those listed in the AS_REQ") + } + } + t := time.Now().UTC() + if t.Sub(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(t) > cfg.LibDefaults.Clockskew { + return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds", cfg.LibDefaults.Clockskew.Seconds()) + } + // RFC 6806 https://tools.ietf.org/html/rfc6806.html#section-11 + if asReq.PAData.Contains(patype.PA_REQ_ENC_PA_REP) && types.IsFlagSet(&k.DecryptedEncPart.Flags, flags.EncPARep) { + if len(k.DecryptedEncPart.EncPAData) < 2 || !k.DecryptedEncPart.EncPAData.Contains(patype.PA_FX_FAST) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "KDC did not respond appropriately to FAST negotiation") + } + for _, pa := range k.DecryptedEncPart.EncPAData { + if pa.PADataType == patype.PA_REQ_ENC_PA_REP { + var pafast types.PAReqEncPARep + err := pafast.Unmarshal(pa.PADataValue) + if err != nil { + return false, krberror.Errorf(err, krberror.EncodingError, "KDC FAST negotiation response error, could not unmarshal PA_REQ_ENC_PA_REP") + } + etype, err := crypto.GetChksumEtype(pafast.ChksumType) + if err != nil { + return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response error") + } + ab, _ := asReq.Marshal() + if !etype.VerifyChecksum(key.KeyValue, ab, pafast.Chksum, keyusage.KEY_USAGE_AS_REQ) { + return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response checksum invalid") + } + } + } + } + return true, nil +} + +// DecryptEncPart decrypts the encrypted part of an TGS_REP. +func (k *TGSRep) DecryptEncPart(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.TGS_REP_ENCPART_SESSION_KEY) + if err != nil { + return krberror.Errorf(err, krberror.DecryptingError, "error decrypting TGS_REP EncPart") + } + var denc EncKDCRepPart + err = denc.Unmarshal(b) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part") + } + k.DecryptedEncPart = denc + return nil +} + +// Verify checks the validity of the TGS_REP message. +func (k *TGSRep) Verify(cfg *config.Config, tgsReq TGSReq) (bool, error) { + if !k.CName.Equal(tgsReq.ReqBody.CName) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.CName, k.CName) + } + if k.Ticket.Realm != tgsReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "realm in response ticket does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.Ticket.Realm) + } + if k.DecryptedEncPart.Nonce != tgsReq.ReqBody.Nonce { + return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request") + } + //if k.Ticket.SName.NameType != tgsReq.ReqBody.SName.NameType || k.Ticket.SName.NameString == nil { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.Ticket.SName) + //} + //for i := range k.Ticket.SName.NameString { + // if k.Ticket.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.Ticket.SName) + // } + //} + //if k.DecryptedEncPart.SName.NameType != tgsReq.ReqBody.SName.NameType || k.DecryptedEncPart.SName.NameString == nil { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName) + //} + //for i := range k.DecryptedEncPart.SName.NameString { + // if k.DecryptedEncPart.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName) + // } + //} + if k.DecryptedEncPart.SRealm != tgsReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.DecryptedEncPart.SRealm) + } + if len(k.DecryptedEncPart.CAddr) > 0 { + if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, tgsReq.ReqBody.Addresses) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the TGS_REP does not match those listed in the TGS_REQ") + } + } + if time.Since(k.DecryptedEncPart.StartTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.StartTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew { + if time.Since(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew { + return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds.", cfg.LibDefaults.Clockskew.Seconds()) + } + } + return true, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCReq.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCReq.go new file mode 100644 index 00000000..3745afed --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCReq.go @@ -0,0 +1,432 @@ +package messages + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.4.1 + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/flags" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/iana/nametype" + "github.com/jcmturner/gokrb5/v8/iana/patype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +type marshalKDCReq struct { + PVNO int `asn1:"explicit,tag:1"` + MsgType int `asn1:"explicit,tag:2"` + PAData types.PADataSequence `asn1:"explicit,optional,tag:3"` + ReqBody asn1.RawValue `asn1:"explicit,tag:4"` +} + +// KDCReqFields represents the KRB_KDC_REQ fields. +type KDCReqFields struct { + PVNO int + MsgType int + PAData types.PADataSequence + ReqBody KDCReqBody + Renewal bool +} + +// ASReq implements RFC 4120 KRB_AS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1. +type ASReq struct { + KDCReqFields +} + +// TGSReq implements RFC 4120 KRB_TGS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1. +type TGSReq struct { + KDCReqFields +} + +type marshalKDCReqBody struct { + KDCOptions asn1.BitString `asn1:"explicit,tag:0"` + CName types.PrincipalName `asn1:"explicit,optional,tag:1"` + Realm string `asn1:"generalstring,explicit,tag:2"` + SName types.PrincipalName `asn1:"explicit,optional,tag:3"` + From time.Time `asn1:"generalized,explicit,optional,tag:4"` + Till time.Time `asn1:"generalized,explicit,tag:5"` + RTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + Nonce int `asn1:"explicit,tag:7"` + EType []int32 `asn1:"explicit,tag:8"` + Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"` + EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"` + // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag + AdditionalTickets asn1.RawValue `asn1:"explicit,optional,tag:11"` +} + +// KDCReqBody implements the KRB_KDC_REQ request body. +type KDCReqBody struct { + KDCOptions asn1.BitString `asn1:"explicit,tag:0"` + CName types.PrincipalName `asn1:"explicit,optional,tag:1"` + Realm string `asn1:"generalstring,explicit,tag:2"` + SName types.PrincipalName `asn1:"explicit,optional,tag:3"` + From time.Time `asn1:"generalized,explicit,optional,tag:4"` + Till time.Time `asn1:"generalized,explicit,tag:5"` + RTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + Nonce int `asn1:"explicit,tag:7"` + EType []int32 `asn1:"explicit,tag:8"` + Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"` + EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"` + AdditionalTickets []Ticket `asn1:"explicit,optional,tag:11"` +} + +// NewASReqForTGT generates a new KRB_AS_REQ struct for a TGT request. +func NewASReqForTGT(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) { + sname := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", realm}, + } + return NewASReq(realm, c, cname, sname) +} + +// NewASReqForChgPasswd generates a new KRB_AS_REQ struct for a change password request. +func NewASReqForChgPasswd(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) { + sname := types.PrincipalName{ + NameType: nametype.KRB_NT_PRINCIPAL, + NameString: []string{"kadmin", "changepw"}, + } + return NewASReq(realm, c, cname, sname) +} + +// NewASReq generates a new KRB_AS_REQ struct for a given SNAME. +func NewASReq(realm string, c *config.Config, cname, sname types.PrincipalName) (ASReq, error) { + nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32)) + if err != nil { + return ASReq{}, err + } + t := time.Now().UTC() + // Copy the default options to make this thread safe + kopts := types.NewKrbFlags() + copy(kopts.Bytes, c.LibDefaults.KDCDefaultOptions.Bytes) + kopts.BitLength = c.LibDefaults.KDCDefaultOptions.BitLength + a := ASReq{ + KDCReqFields{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_AS_REQ, + PAData: types.PADataSequence{}, + ReqBody: KDCReqBody{ + KDCOptions: kopts, + Realm: realm, + CName: cname, + SName: sname, + Till: t.Add(c.LibDefaults.TicketLifetime), + Nonce: int(nonce.Int64()), + EType: c.LibDefaults.DefaultTktEnctypeIDs, + }, + }, + } + if c.LibDefaults.Forwardable { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Forwardable) + } + if c.LibDefaults.Canonicalize { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Canonicalize) + } + if c.LibDefaults.Proxiable { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Proxiable) + } + if c.LibDefaults.RenewLifetime != 0 { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Renewable) + a.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime) + a.ReqBody.RTime = t.Add(time.Duration(48) * time.Hour) + } + if !c.LibDefaults.NoAddresses { + ha, err := types.LocalHostAddresses() + if err != nil { + return a, fmt.Errorf("could not get local addresses: %v", err) + } + ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...) + a.ReqBody.Addresses = ha + } + return a, nil +} + +// NewTGSReq generates a new KRB_TGS_REQ struct. +func NewTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, tgt Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool) (TGSReq, error) { + a, err := tgsReq(cname, sname, kdcRealm, renewal, c) + if err != nil { + return a, err + } + err = a.setPAData(tgt, sessionKey) + return a, err +} + +// NewUser2UserTGSReq returns a TGS-REQ suitable for user-to-user authentication (https://tools.ietf.org/html/rfc4120#section-3.7) +func NewUser2UserTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, clientTGT Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool, verifyingTGT Ticket) (TGSReq, error) { + a, err := tgsReq(cname, sname, kdcRealm, renewal, c) + if err != nil { + return a, err + } + a.ReqBody.AdditionalTickets = []Ticket{verifyingTGT} + types.SetFlag(&a.ReqBody.KDCOptions, flags.EncTktInSkey) + err = a.setPAData(clientTGT, sessionKey) + return a, err +} + +// tgsReq populates the fields for a TGS_REQ +func tgsReq(cname, sname types.PrincipalName, kdcRealm string, renewal bool, c *config.Config) (TGSReq, error) { + nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32)) + if err != nil { + return TGSReq{}, err + } + t := time.Now().UTC() + k := KDCReqFields{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_TGS_REQ, + ReqBody: KDCReqBody{ + KDCOptions: types.NewKrbFlags(), + Realm: kdcRealm, + CName: cname, // Add the CName to make validation of the reply easier + SName: sname, + Till: t.Add(c.LibDefaults.TicketLifetime), + Nonce: int(nonce.Int64()), + EType: c.LibDefaults.DefaultTGSEnctypeIDs, + }, + Renewal: renewal, + } + if c.LibDefaults.Forwardable { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Forwardable) + } + if c.LibDefaults.Canonicalize { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Canonicalize) + } + if c.LibDefaults.Proxiable { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Proxiable) + } + if c.LibDefaults.RenewLifetime > time.Duration(0) { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable) + k.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime) + } + if !c.LibDefaults.NoAddresses { + ha, err := types.LocalHostAddresses() + if err != nil { + return TGSReq{}, fmt.Errorf("could not get local addresses: %v", err) + } + ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...) + k.ReqBody.Addresses = ha + } + if renewal { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Renew) + types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable) + } + return TGSReq{ + k, + }, nil +} + +func (k *TGSReq) setPAData(tgt Ticket, sessionKey types.EncryptionKey) error { + // Marshal the request and calculate checksum + b, err := k.ReqBody.Marshal() + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error marshaling TGS_REQ body") + } + etype, err := crypto.GetEtype(sessionKey.KeyType) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting etype to encrypt authenticator") + } + cb, err := etype.GetChecksumHash(sessionKey.KeyValue, b, keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM) + if err != nil { + return krberror.Errorf(err, krberror.ChksumError, "error getting etype checksum hash") + } + + // Form PAData for TGS_REQ + // Create authenticator + auth, err := types.NewAuthenticator(tgt.Realm, k.ReqBody.CName) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator") + } + auth.Cksum = types.Checksum{ + CksumType: etype.GetHashID(), + Checksum: cb, + } + // Create AP_REQ + apReq, err := NewAPReq(tgt, sessionKey, auth) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AP_REQ") + } + apb, err := apReq.Marshal() + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error marshaling AP_REQ for pre-authentication data") + } + k.PAData = types.PADataSequence{ + types.PAData{ + PADataType: patype.PA_TGS_REQ, + PADataValue: apb, + }, + } + return nil +} + +// Unmarshal bytes b into the ASReq struct. +func (k *ASReq) Unmarshal(b []byte) error { + var m marshalKDCReq + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREQ)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling AS_REQ") + } + expectedMsgType := msgtype.KRB_AS_REQ + if m.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a AS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) + } + var reqb KDCReqBody + err = reqb.Unmarshal(m.ReqBody.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error processing AS_REQ body") + } + k.MsgType = m.MsgType + k.PAData = m.PAData + k.PVNO = m.PVNO + k.ReqBody = reqb + return nil +} + +// Unmarshal bytes b into the TGSReq struct. +func (k *TGSReq) Unmarshal(b []byte) error { + var m marshalKDCReq + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREQ)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling TGS_REQ") + } + expectedMsgType := msgtype.KRB_TGS_REQ + if m.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a TGS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) + } + var reqb KDCReqBody + err = reqb.Unmarshal(m.ReqBody.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error processing TGS_REQ body") + } + k.MsgType = m.MsgType + k.PAData = m.PAData + k.PVNO = m.PVNO + k.ReqBody = reqb + return nil +} + +// Unmarshal bytes b into the KRB_KDC_REQ body struct. +func (k *KDCReqBody) Unmarshal(b []byte) error { + var m marshalKDCReqBody + _, err := asn1.Unmarshal(b, &m) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling KDC_REQ body") + } + k.KDCOptions = m.KDCOptions + if len(k.KDCOptions.Bytes) < 4 { + tb := make([]byte, 4-len(k.KDCOptions.Bytes)) + k.KDCOptions.Bytes = append(tb, k.KDCOptions.Bytes...) + k.KDCOptions.BitLength = len(k.KDCOptions.Bytes) * 8 + } + k.CName = m.CName + k.Realm = m.Realm + k.SName = m.SName + k.From = m.From + k.Till = m.Till + k.RTime = m.RTime + k.Nonce = m.Nonce + k.EType = m.EType + k.Addresses = m.Addresses + k.EncAuthData = m.EncAuthData + if len(m.AdditionalTickets.Bytes) > 0 { + k.AdditionalTickets, err = unmarshalTicketsSequence(m.AdditionalTickets) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling additional tickets") + } + } + return nil +} + +// Marshal ASReq struct. +func (k *ASReq) Marshal() ([]byte, error) { + m := marshalKDCReq{ + PVNO: k.PVNO, + MsgType: k.MsgType, + PAData: k.PAData, + } + b, err := k.ReqBody.Marshal() + if err != nil { + var mk []byte + return mk, err + } + m.ReqBody = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 4, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.ASREQ) + return mk, nil +} + +// Marshal TGSReq struct. +func (k *TGSReq) Marshal() ([]byte, error) { + m := marshalKDCReq{ + PVNO: k.PVNO, + MsgType: k.MsgType, + PAData: k.PAData, + } + b, err := k.ReqBody.Marshal() + if err != nil { + var mk []byte + return mk, err + } + m.ReqBody = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 4, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.TGSREQ) + return mk, nil +} + +// Marshal KRB_KDC_REQ body struct. +func (k *KDCReqBody) Marshal() ([]byte, error) { + var b []byte + m := marshalKDCReqBody{ + KDCOptions: k.KDCOptions, + CName: k.CName, + Realm: k.Realm, + SName: k.SName, + From: k.From, + Till: k.Till, + RTime: k.RTime, + Nonce: k.Nonce, + EType: k.EType, + Addresses: k.Addresses, + EncAuthData: k.EncAuthData, + } + rawtkts, err := MarshalTicketSequence(k.AdditionalTickets) + if err != nil { + return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body additional tickets") + } + //The asn1.rawValue needs the tag setting on it for where it is in the KDCReqBody + rawtkts.Tag = 11 + if len(rawtkts.Bytes) > 0 { + m.AdditionalTickets = rawtkts + } + b, err = asn1.Marshal(m) + if err != nil { + return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body") + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBCred.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBCred.go new file mode 100644 index 00000000..536fdb9e --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBCred.go @@ -0,0 +1,102 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +type marshalKRBCred struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + Tickets asn1.RawValue `asn1:"explicit,tag:2"` + EncPart types.EncryptedData `asn1:"explicit,tag:3"` +} + +// KRBCred implements RFC 4120 KRB_CRED: https://tools.ietf.org/html/rfc4120#section-5.8.1. +type KRBCred struct { + PVNO int + MsgType int + Tickets []Ticket + EncPart types.EncryptedData + DecryptedEncPart EncKrbCredPart +} + +// EncKrbCredPart is the encrypted part of KRB_CRED. +type EncKrbCredPart struct { + TicketInfo []KrbCredInfo `asn1:"explicit,tag:0"` + Nouce int `asn1:"optional,explicit,tag:1"` + Timestamp time.Time `asn1:"generalized,optional,explicit,tag:2"` + Usec int `asn1:"optional,explicit,tag:3"` + SAddress types.HostAddress `asn1:"optional,explicit,tag:4"` + RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` +} + +// KrbCredInfo is the KRB_CRED_INFO part of KRB_CRED. +type KrbCredInfo struct { + Key types.EncryptionKey `asn1:"explicit,tag:0"` + PRealm string `asn1:"generalstring,optional,explicit,tag:1"` + PName types.PrincipalName `asn1:"optional,explicit,tag:2"` + Flags asn1.BitString `asn1:"optional,explicit,tag:3"` + AuthTime time.Time `asn1:"generalized,optional,explicit,tag:4"` + StartTime time.Time `asn1:"generalized,optional,explicit,tag:5"` + EndTime time.Time `asn1:"generalized,optional,explicit,tag:6"` + RenewTill time.Time `asn1:"generalized,optional,explicit,tag:7"` + SRealm string `asn1:"optional,explicit,ia5,tag:8"` + SName types.PrincipalName `asn1:"optional,explicit,tag:9"` + CAddr types.HostAddresses `asn1:"optional,explicit,tag:10"` +} + +// Unmarshal bytes b into the KRBCred struct. +func (k *KRBCred) Unmarshal(b []byte) error { + var m marshalKRBCred + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBCred)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_CRED + if m.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_CRED. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) + } + k.PVNO = m.PVNO + k.MsgType = m.MsgType + k.EncPart = m.EncPart + if len(m.Tickets.Bytes) > 0 { + k.Tickets, err = unmarshalTicketsSequence(m.Tickets) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling tickets within KRB_CRED") + } + } + return nil +} + +// DecryptEncPart decrypts the encrypted part of a KRB_CRED. +func (k *KRBCred) DecryptEncPart(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_CRED_ENCPART) + if err != nil { + return krberror.Errorf(err, krberror.DecryptingError, "error decrypting KRB_CRED EncPart") + } + var denc EncKrbCredPart + err = denc.Unmarshal(b) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part of KRB_CRED") + } + k.DecryptedEncPart = denc + return nil +} + +// Unmarshal bytes b into the encrypted part of KRB_CRED. +func (k *EncKrbCredPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbCredPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling EncKrbCredPart") + } + return nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBError.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBError.go new file mode 100644 index 00000000..d2cf32d6 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBError.go @@ -0,0 +1,94 @@ +// Package messages implements Kerberos 5 message types and methods. +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +// KRBError implements RFC 4120 KRB_ERROR: https://tools.ietf.org/html/rfc4120#section-5.9.1. +type KRBError struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + CTime time.Time `asn1:"generalized,optional,explicit,tag:2"` + Cusec int `asn1:"optional,explicit,tag:3"` + STime time.Time `asn1:"generalized,explicit,tag:4"` + Susec int `asn1:"explicit,tag:5"` + ErrorCode int32 `asn1:"explicit,tag:6"` + CRealm string `asn1:"generalstring,optional,explicit,tag:7"` + CName types.PrincipalName `asn1:"optional,explicit,tag:8"` + Realm string `asn1:"generalstring,explicit,tag:9"` + SName types.PrincipalName `asn1:"explicit,tag:10"` + EText string `asn1:"generalstring,optional,explicit,tag:11"` + EData []byte `asn1:"optional,explicit,tag:12"` +} + +// NewKRBError creates a new KRBError. +func NewKRBError(sname types.PrincipalName, realm string, code int32, etext string) KRBError { + t := time.Now().UTC() + return KRBError{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_ERROR, + STime: t, + Susec: int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)), + ErrorCode: code, + SName: sname, + Realm: realm, + EText: etext, + } +} + +// Unmarshal bytes b into the KRBError struct. +func (k *KRBError) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBError)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "KRB_ERROR unmarshal error") + } + expectedMsgType := msgtype.KRB_ERROR + if k.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_ERROR. Expected: %v; Actual: %v", expectedMsgType, k.MsgType) + } + return nil +} + +// Marshal a KRBError into bytes. +func (k *KRBError) Marshal() ([]byte, error) { + b, err := asn1.Marshal(*k) + if err != nil { + return b, krberror.Errorf(err, krberror.EncodingError, "error marshaling KRBError") + } + b = asn1tools.AddASNAppTag(b, asnAppTag.KRBError) + return b, nil +} + +// Error method implementing error interface on KRBError struct. +func (k KRBError) Error() string { + etxt := fmt.Sprintf("KRB Error: %s", errorcode.Lookup(k.ErrorCode)) + if k.EText != "" { + etxt = fmt.Sprintf("%s - %s", etxt, k.EText) + } + return etxt +} + +func processUnmarshalReplyError(b []byte, err error) error { + switch err.(type) { + case asn1.StructuralError: + var krberr KRBError + tmperr := krberr.Unmarshal(b) + if tmperr != nil { + return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply") + } + return krberr + default: + return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply") + } +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBPriv.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBPriv.go new file mode 100644 index 00000000..0ca61494 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBPriv.go @@ -0,0 +1,108 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +// KRBPriv implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.7.1. +type KRBPriv struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + EncPart types.EncryptedData `asn1:"explicit,tag:3"` + DecryptedEncPart EncKrbPrivPart `asn1:"optional,omitempty"` // Not part of ASN1 bytes so marked as optional so unmarshalling works +} + +// EncKrbPrivPart is the encrypted part of KRB_PRIV. +type EncKrbPrivPart struct { + UserData []byte `asn1:"explicit,tag:0"` + Timestamp time.Time `asn1:"generalized,optional,explicit,tag:1"` + Usec int `asn1:"optional,explicit,tag:2"` + SequenceNumber int64 `asn1:"optional,explicit,tag:3"` + SAddress types.HostAddress `asn1:"explicit,tag:4"` + RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` +} + +// NewKRBPriv returns a new KRBPriv type. +func NewKRBPriv(part EncKrbPrivPart) KRBPriv { + return KRBPriv{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_PRIV, + DecryptedEncPart: part, + } +} + +// Unmarshal bytes b into the KRBPriv struct. +func (k *KRBPriv) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBPriv)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_PRIV + if k.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_PRIV. Expected: %v; Actual: %v", expectedMsgType, k.MsgType) + } + return nil +} + +// Unmarshal bytes b into the EncKrbPrivPart struct. +func (k *EncKrbPrivPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbPrivPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "KRB_PRIV unmarshal error") + } + return nil +} + +// Marshal the KRBPriv. +func (k *KRBPriv) Marshal() ([]byte, error) { + tk := KRBPriv{ + PVNO: k.PVNO, + MsgType: k.MsgType, + EncPart: k.EncPart, + } + b, err := asn1.Marshal(tk) + if err != nil { + return []byte{}, err + } + b = asn1tools.AddASNAppTag(b, asnAppTag.KRBPriv) + return b, nil +} + +// EncryptEncPart encrypts the DecryptedEncPart within the KRBPriv. +// Use to prepare for marshaling. +func (k *KRBPriv) EncryptEncPart(key types.EncryptionKey) error { + b, err := asn1.Marshal(k.DecryptedEncPart) + if err != nil { + return err + } + b = asn1tools.AddASNAppTag(b, asnAppTag.EncKrbPrivPart) + k.EncPart, err = crypto.GetEncryptedData(b, key, keyusage.KRB_PRIV_ENCPART, 1) + if err != nil { + return err + } + return nil +} + +// DecryptEncPart decrypts the encrypted part of the KRBPriv message. +func (k *KRBPriv) DecryptEncPart(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_PRIV_ENCPART) + if err != nil { + return fmt.Errorf("error decrypting KRBPriv EncPart: %v", err) + } + err = k.DecryptedEncPart.Unmarshal(b) + if err != nil { + return fmt.Errorf("error unmarshaling encrypted part: %v", err) + } + return nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBSafe.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBSafe.go new file mode 100644 index 00000000..52cd2844 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBSafe.go @@ -0,0 +1,43 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/msgtype" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/types" +) + +// KRBSafe implements RFC 4120 KRB_SAFE: https://tools.ietf.org/html/rfc4120#section-5.6.1. +type KRBSafe struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + SafeBody KRBSafeBody `asn1:"explicit,tag:2"` + Cksum types.Checksum `asn1:"explicit,tag:3"` +} + +// KRBSafeBody implements the KRB_SAFE_BODY of KRB_SAFE. +type KRBSafeBody struct { + UserData []byte `asn1:"explicit,tag:0"` + Timestamp time.Time `asn1:"generalized,optional,explicit,tag:1"` + Usec int `asn1:"optional,explicit,tag:2"` + SequenceNumber int64 `asn1:"optional,explicit,tag:3"` + SAddress types.HostAddress `asn1:"explicit,tag:4"` + RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` +} + +// Unmarshal bytes b into the KRBSafe struct. +func (s *KRBSafe) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, s, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBSafe)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_SAFE + if s.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_SAFE. Expected: %v; Actual: %v", expectedMsgType, s.MsgType) + } + return nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/Ticket.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/Ticket.go new file mode 100644 index 00000000..11efad62 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/Ticket.go @@ -0,0 +1,262 @@ +package messages + +import ( + "fmt" + "log" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana" + "github.com/jcmturner/gokrb5/v8/iana/adtype" + "github.com/jcmturner/gokrb5/v8/iana/asnAppTag" + "github.com/jcmturner/gokrb5/v8/iana/errorcode" + "github.com/jcmturner/gokrb5/v8/iana/flags" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/krberror" + "github.com/jcmturner/gokrb5/v8/pac" + "github.com/jcmturner/gokrb5/v8/types" +) + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.3 + +// Ticket implements the Kerberos ticket. +type Ticket struct { + TktVNO int `asn1:"explicit,tag:0"` + Realm string `asn1:"generalstring,explicit,tag:1"` + SName types.PrincipalName `asn1:"explicit,tag:2"` + EncPart types.EncryptedData `asn1:"explicit,tag:3"` + DecryptedEncPart EncTicketPart `asn1:"optional"` // Not part of ASN1 bytes so marked as optional so unmarshalling works +} + +// EncTicketPart is the encrypted part of the Ticket. +type EncTicketPart struct { + Flags asn1.BitString `asn1:"explicit,tag:0"` + Key types.EncryptionKey `asn1:"explicit,tag:1"` + CRealm string `asn1:"generalstring,explicit,tag:2"` + CName types.PrincipalName `asn1:"explicit,tag:3"` + Transited TransitedEncoding `asn1:"explicit,tag:4"` + AuthTime time.Time `asn1:"generalized,explicit,tag:5"` + StartTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + EndTime time.Time `asn1:"generalized,explicit,tag:7"` + RenewTill time.Time `asn1:"generalized,explicit,optional,tag:8"` + CAddr types.HostAddresses `asn1:"explicit,optional,tag:9"` + AuthorizationData types.AuthorizationData `asn1:"explicit,optional,tag:10"` +} + +// TransitedEncoding part of the ticket's encrypted part. +type TransitedEncoding struct { + TRType int32 `asn1:"explicit,tag:0"` + Contents []byte `asn1:"explicit,tag:1"` +} + +// NewTicket creates a new Ticket instance. +func NewTicket(cname types.PrincipalName, crealm string, sname types.PrincipalName, srealm string, flags asn1.BitString, sktab *keytab.Keytab, eTypeID int32, kvno int, authTime, startTime, endTime, renewTill time.Time) (Ticket, types.EncryptionKey, error) { + etype, err := crypto.GetEtype(eTypeID) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting etype for new ticket") + } + sessionKey, err := types.GenerateEncryptionKey(etype) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error generating session key") + } + + etp := EncTicketPart{ + Flags: flags, + Key: sessionKey, + CRealm: crealm, + CName: cname, + Transited: TransitedEncoding{}, + AuthTime: authTime, + StartTime: startTime, + EndTime: endTime, + RenewTill: renewTill, + } + b, err := asn1.Marshal(etp) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncodingError, "error marshalling ticket encpart") + } + b = asn1tools.AddASNAppTag(b, asnAppTag.EncTicketPart) + skey, _, err := sktab.GetEncryptionKey(sname, srealm, kvno, eTypeID) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting encryption key for new ticket") + } + ed, err := crypto.GetEncryptedData(b, skey, keyusage.KDC_REP_TICKET, kvno) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error encrypting ticket encpart") + } + tkt := Ticket{ + TktVNO: iana.PVNO, + Realm: srealm, + SName: sname, + EncPart: ed, + } + return tkt, sessionKey, nil +} + +// Unmarshal bytes b into a Ticket struct. +func (t *Ticket) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.Ticket)) + return err +} + +// Marshal the Ticket. +func (t *Ticket) Marshal() ([]byte, error) { + b, err := asn1.Marshal(*t) + if err != nil { + return nil, err + } + b = asn1tools.AddASNAppTag(b, asnAppTag.Ticket) + return b, nil +} + +// Unmarshal bytes b into the EncTicketPart struct. +func (t *EncTicketPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.EncTicketPart)) + return err +} + +// unmarshalTicket returns a ticket from the bytes provided. +func unmarshalTicket(b []byte) (t Ticket, err error) { + err = t.Unmarshal(b) + return +} + +// UnmarshalTicketsSequence returns a slice of Tickets from a raw ASN1 value. +func unmarshalTicketsSequence(in asn1.RawValue) ([]Ticket, error) { + //This is a workaround to a asn1 decoding issue in golang - https://github.com/golang/go/issues/17321. It's not pretty I'm afraid + //We pull out raw values from the larger raw value (that is actually the data of the sequence of raw values) and track our position moving along the data. + b := in.Bytes + // Ignore the head of the asn1 stream (1 byte for tag and those for the length) as this is what tells us its a sequence but we're handling it ourselves + p := 1 + asn1tools.GetNumberBytesInLengthHeader(in.Bytes) + var tkts []Ticket + var raw asn1.RawValue + for p < (len(b)) { + _, err := asn1.UnmarshalWithParams(b[p:], &raw, fmt.Sprintf("application,tag:%d", asnAppTag.Ticket)) + if err != nil { + return nil, fmt.Errorf("unmarshaling sequence of tickets failed getting length of ticket: %v", err) + } + t, err := unmarshalTicket(b[p:]) + if err != nil { + return nil, fmt.Errorf("unmarshaling sequence of tickets failed: %v", err) + } + p += len(raw.FullBytes) + tkts = append(tkts, t) + } + MarshalTicketSequence(tkts) + return tkts, nil +} + +// MarshalTicketSequence marshals a slice of Tickets returning an ASN1 raw value containing the ticket sequence. +func MarshalTicketSequence(tkts []Ticket) (asn1.RawValue, error) { + raw := asn1.RawValue{ + Class: 2, + IsCompound: true, + } + if len(tkts) < 1 { + // There are no tickets to marshal + return raw, nil + } + var btkts []byte + for i, t := range tkts { + b, err := t.Marshal() + if err != nil { + return raw, fmt.Errorf("error marshaling ticket number %d in sequence of tickets", i+1) + } + btkts = append(btkts, b...) + } + // The ASN1 wrapping consists of 2 bytes: + // 1st byte -> Identifier Octet - In this case an OCTET STRING (ASN TAG + // 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here. + // Application Tag: + //| Byte: | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | + //| Value: | 0 | 1 | 1 | From the RFC spec 4120 | + //| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value | + btkts = append(asn1tools.MarshalLengthBytes(len(btkts)), btkts...) + btkts = append([]byte{byte(32 + asn1.TagSequence)}, btkts...) + raw.Bytes = btkts + // If we need to create the full bytes then identifier octet is "context-specific" = 128 + "constructed" + 32 + the wrapping explicit tag (11) + //fmt.Fprintf(os.Stderr, "mRaw fb: %v\n", raw.FullBytes) + return raw, nil +} + +// DecryptEncPart decrypts the encrypted part of the ticket. +// The sname argument can be used to specify which service principal's key should be used to decrypt the ticket. +// If nil is passed as the sname then the service principal specified within the ticket it used. +func (t *Ticket) DecryptEncPart(keytab *keytab.Keytab, sname *types.PrincipalName) error { + if sname == nil { + sname = &t.SName + } + key, _, err := keytab.GetEncryptionKey(*sname, t.Realm, t.EncPart.KVNO, t.EncPart.EType) + if err != nil { + return NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err)) + } + return t.Decrypt(key) +} + +// Decrypt decrypts the encrypted part of the ticket using the key provided. +func (t *Ticket) Decrypt(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(t.EncPart, key, keyusage.KDC_REP_TICKET) + if err != nil { + return fmt.Errorf("error decrypting Ticket EncPart: %v", err) + } + var denc EncTicketPart + err = denc.Unmarshal(b) + if err != nil { + return fmt.Errorf("error unmarshaling encrypted part: %v", err) + } + t.DecryptedEncPart = denc + return nil +} + +// GetPACType returns a Microsoft PAC that has been extracted from the ticket and processed. +func (t *Ticket) GetPACType(keytab *keytab.Keytab, sname *types.PrincipalName, l *log.Logger) (bool, pac.PACType, error) { + var isPAC bool + for _, ad := range t.DecryptedEncPart.AuthorizationData { + if ad.ADType == adtype.ADIfRelevant { + var ad2 types.AuthorizationData + err := ad2.Unmarshal(ad.ADData) + if err != nil { + l.Printf("PAC authorization data could not be unmarshaled: %v", err) + continue + } + if ad2[0].ADType == adtype.ADWin2KPAC { + isPAC = true + var p pac.PACType + err = p.Unmarshal(ad2[0].ADData) + if err != nil { + return isPAC, p, fmt.Errorf("error unmarshaling PAC: %v", err) + } + if sname == nil { + sname = &t.SName + } + key, _, err := keytab.GetEncryptionKey(*sname, t.Realm, t.EncPart.KVNO, t.EncPart.EType) + if err != nil { + return isPAC, p, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err)) + } + err = p.ProcessPACInfoBuffers(key, l) + return isPAC, p, err + } + } + } + return isPAC, pac.PACType{}, nil +} + +// Valid checks it the ticket is currently valid. Max duration passed endtime passed in as argument. +func (t *Ticket) Valid(d time.Duration) (bool, error) { + // Check for future tickets or invalid tickets + time := time.Now().UTC() + if t.DecryptedEncPart.StartTime.Sub(time) > d || types.IsFlagSet(&t.DecryptedEncPart.Flags, flags.Invalid) { + return false, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_TKT_NYV, "service ticket provided is not yet valid") + } + + // Check for expired ticket + if time.Sub(t.DecryptedEncPart.EndTime) > d { + return false, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_TKT_EXPIRED, "service ticket provided has expired") + } + + return true, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/client_claims.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_claims.go new file mode 100644 index 00000000..36871e00 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_claims.go @@ -0,0 +1,34 @@ +package pac + +import ( + "bytes" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// Claims reference: https://msdn.microsoft.com/en-us/library/hh553895.aspx + +// ClientClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh536365.aspx +type ClientClaimsInfo struct { + ClaimsSetMetadata mstypes.ClaimsSetMetadata + ClaimsSet mstypes.ClaimsSet +} + +// Unmarshal bytes into the ClientClaimsInfo struct +func (k *ClientClaimsInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + m := new(mstypes.ClaimsSetMetadata) + err = dec.Decode(m) + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSetMetadata: %v", err) + return + } + k.ClaimsSetMetadata = *m + k.ClaimsSet, err = k.ClaimsSetMetadata.ClaimsSet() + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSet: %v", err) + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/client_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_info.go new file mode 100644 index 00000000..ddd95780 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_info.go @@ -0,0 +1,31 @@ +package pac + +import ( + "bytes" + + "github.com/jcmturner/rpc/v2/mstypes" +) + +// ClientInfo implements https://msdn.microsoft.com/en-us/library/cc237951.aspx +type ClientInfo struct { + ClientID mstypes.FileTime // A FILETIME structure in little-endian format that contains the Kerberos initial ticket-granting ticket TGT authentication time + NameLength uint16 // An unsigned 16-bit integer in little-endian format that specifies the length, in bytes, of the Name field. + Name string // An array of 16-bit Unicode characters in little-endian format that contains the client's account name. +} + +// Unmarshal bytes into the ClientInfo struct +func (k *ClientInfo) Unmarshal(b []byte) (err error) { + //The PAC_CLIENT_INFO structure is a simple structure that is not NDR-encoded. + r := mstypes.NewReader(bytes.NewReader(b)) + + k.ClientID, err = r.FileTime() + if err != nil { + return + } + k.NameLength, err = r.Uint16() + if err != nil { + return + } + k.Name, err = r.UTF16String(int(k.NameLength)) + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/credentials_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/credentials_info.go new file mode 100644 index 00000000..0c7ccd42 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/credentials_info.go @@ -0,0 +1,86 @@ +package pac + +import ( + "bytes" + "errors" + "fmt" + + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/types" + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// https://msdn.microsoft.com/en-us/library/cc237931.aspx + +// CredentialsInfo implements https://msdn.microsoft.com/en-us/library/cc237953.aspx +type CredentialsInfo struct { + Version uint32 // A 32-bit unsigned integer in little-endian format that defines the version. MUST be 0x00000000. + EType uint32 + PACCredentialDataEncrypted []byte // Key usage number for encryption: KERB_NON_KERB_SALT (16) + PACCredentialData CredentialData +} + +// Unmarshal bytes into the CredentialsInfo struct +func (c *CredentialsInfo) Unmarshal(b []byte, k types.EncryptionKey) (err error) { + //The CredentialsInfo structure is a simple structure that is not NDR-encoded. + r := mstypes.NewReader(bytes.NewReader(b)) + + c.Version, err = r.Uint32() + if err != nil { + return + } + if c.Version != 0 { + err = errors.New("credentials info version is not zero") + return + } + c.EType, err = r.Uint32() + if err != nil { + return + } + c.PACCredentialDataEncrypted, err = r.ReadBytes(len(b) - 8) + if err != nil { + err = fmt.Errorf("error reading PAC Credetials Data: %v", err) + return + } + + err = c.DecryptEncPart(k) + if err != nil { + err = fmt.Errorf("error decrypting PAC Credentials Data: %v", err) + return + } + return +} + +// DecryptEncPart decrypts the encrypted part of the CredentialsInfo. +func (c *CredentialsInfo) DecryptEncPart(k types.EncryptionKey) error { + if k.KeyType != int32(c.EType) { + return fmt.Errorf("key provided is not the correct type. Type needed: %d, type provided: %d", c.EType, k.KeyType) + } + pt, err := crypto.DecryptMessage(c.PACCredentialDataEncrypted, k, keyusage.KERB_NON_KERB_SALT) + if err != nil { + return err + } + err = c.PACCredentialData.Unmarshal(pt) + if err != nil { + return err + } + return nil +} + +// CredentialData implements https://msdn.microsoft.com/en-us/library/cc237952.aspx +type CredentialData struct { + CredentialCount uint32 + Credentials []SECPKGSupplementalCred // Size is the value of CredentialCount +} + +// Unmarshal converts the bytes provided into a CredentialData type. +func (c *CredentialData) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(c) + if err != nil { + err = fmt.Errorf("error unmarshaling KerbValidationInfo: %v", err) + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/device_claims.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_claims.go new file mode 100644 index 00000000..6eb29260 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_claims.go @@ -0,0 +1,34 @@ +package pac + +import ( + "bytes" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// Claims reference: https://msdn.microsoft.com/en-us/library/hh553895.aspx + +// DeviceClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh554226.aspx +type DeviceClaimsInfo struct { + ClaimsSetMetadata mstypes.ClaimsSetMetadata + ClaimsSet mstypes.ClaimsSet +} + +// Unmarshal bytes into the ClientClaimsInfo struct +func (k *DeviceClaimsInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + m := new(mstypes.ClaimsSetMetadata) + err = dec.Decode(m) + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSetMetadata: %v", err) + return + } + k.ClaimsSetMetadata = *m + k.ClaimsSet, err = k.ClaimsSetMetadata.ClaimsSet() + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSet: %v", err) + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/device_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_info.go new file mode 100644 index 00000000..ce82daa5 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_info.go @@ -0,0 +1,32 @@ +package pac + +import ( + "bytes" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// DeviceInfo implements https://msdn.microsoft.com/en-us/library/hh536402.aspx +type DeviceInfo struct { + UserID uint32 // A 32-bit unsigned integer that contains the RID of the account. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account. + PrimaryGroupID uint32 // A 32-bit unsigned integer that contains the RID for the primary group to which this account belongs. + AccountDomainID mstypes.RPCSID `ndr:"pointer"` // A SID structure that contains the SID for the domain of the account.This member is used in conjunction with the UserId, and GroupIds members to create the user and group SIDs for the client. + AccountGroupCount uint32 // A 32-bit unsigned integer that contains the number of groups within the account domain to which the account belongs + AccountGroupIDs []mstypes.GroupMembership `ndr:"pointer,conformant"` // A pointer to a list of GROUP_MEMBERSHIP (section 2.2.2) structures that contains the groups to which the account belongs in the account domain. The number of groups in this list MUST be equal to GroupCount. + SIDCount uint32 // A 32-bit unsigned integer that contains the total number of SIDs present in the ExtraSids member. + ExtraSIDs []mstypes.KerbSidAndAttributes `ndr:"pointer,conformant"` // A pointer to a list of KERB_SID_AND_ATTRIBUTES structures that contain a list of SIDs corresponding to groups not in domains. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account. + DomainGroupCount uint32 // A 32-bit unsigned integer that contains the number of domains with groups to which the account belongs. + DomainGroup []mstypes.DomainGroupMembership `ndr:"pointer,conformant"` // A pointer to a list of DOMAIN_GROUP_MEMBERSHIP structures (section 2.2.3) that contains the domains to which the account belongs to a group. The number of sets in this list MUST be equal to DomainCount. +} + +// Unmarshal bytes into the DeviceInfo struct +func (k *DeviceInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(k) + if err != nil { + err = fmt.Errorf("error unmarshaling DeviceInfo: %v", err) + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/kerb_validation_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/kerb_validation_info.go new file mode 100644 index 00000000..dde78614 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/kerb_validation_info.go @@ -0,0 +1,110 @@ +// Package pac implements Microsoft Privilege Attribute Certificate (PAC) processing. +package pac + +import ( + "bytes" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// KERB_VALIDATION_INFO flags. +const ( + USERFLAG_GUEST = 31 // Authentication was done via the GUEST account; no password was used. + USERFLAG_NO_ENCRYPTION_AVAILABLE = 30 // No encryption is available. + USERFLAG_LAN_MANAGER_KEY = 28 // LAN Manager key was used for authentication. + USERFLAG_SUB_AUTH = 25 // Sub-authentication used; session key came from the sub-authentication package. + USERFLAG_EXTRA_SIDS = 26 // Indicates that the ExtraSids field is populated and contains additional SIDs. + USERFLAG_MACHINE_ACCOUNT = 24 // Indicates that the account is a machine account. + USERFLAG_DC_NTLM2 = 23 // Indicates that the domain controller understands NTLMv2. + USERFLAG_RESOURCE_GROUPIDS = 22 // Indicates that the ResourceGroupIds field is populated. + USERFLAG_PROFILEPATH = 21 // Indicates that ProfilePath is populated. + USERFLAG_NTLM2_NTCHALLENGERESP = 20 // The NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation. + USERFLAG_LM2_LMCHALLENGERESP = 19 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation. + USERFLAG_AUTH_LMCHALLENGERESP_KEY_NTCHALLENGERESP = 18 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and the NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used session key generation. +) + +// KerbValidationInfo implement https://msdn.microsoft.com/en-us/library/cc237948.aspx +type KerbValidationInfo struct { + LogOnTime mstypes.FileTime + LogOffTime mstypes.FileTime + KickOffTime mstypes.FileTime + PasswordLastSet mstypes.FileTime + PasswordCanChange mstypes.FileTime + PasswordMustChange mstypes.FileTime + EffectiveName mstypes.RPCUnicodeString + FullName mstypes.RPCUnicodeString + LogonScript mstypes.RPCUnicodeString + ProfilePath mstypes.RPCUnicodeString + HomeDirectory mstypes.RPCUnicodeString + HomeDirectoryDrive mstypes.RPCUnicodeString + LogonCount uint16 + BadPasswordCount uint16 + UserID uint32 + PrimaryGroupID uint32 + GroupCount uint32 + GroupIDs []mstypes.GroupMembership `ndr:"pointer,conformant"` + UserFlags uint32 + UserSessionKey mstypes.UserSessionKey + LogonServer mstypes.RPCUnicodeString + LogonDomainName mstypes.RPCUnicodeString + LogonDomainID mstypes.RPCSID `ndr:"pointer"` + Reserved1 [2]uint32 // Has 2 elements + UserAccountControl uint32 + SubAuthStatus uint32 + LastSuccessfulILogon mstypes.FileTime + LastFailedILogon mstypes.FileTime + FailedILogonCount uint32 + Reserved3 uint32 + SIDCount uint32 + ExtraSIDs []mstypes.KerbSidAndAttributes `ndr:"pointer,conformant"` + ResourceGroupDomainSID mstypes.RPCSID `ndr:"pointer"` + ResourceGroupCount uint32 + ResourceGroupIDs []mstypes.GroupMembership `ndr:"pointer,conformant"` +} + +// Unmarshal bytes into the DeviceInfo struct +func (k *KerbValidationInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(k) + if err != nil { + err = fmt.Errorf("error unmarshaling KerbValidationInfo: %v", err) + } + return +} + +// GetGroupMembershipSIDs returns a slice of strings containing the group membership SIDs found in the PAC. +func (k *KerbValidationInfo) GetGroupMembershipSIDs() []string { + var g []string + lSID := k.LogonDomainID.String() + for i := range k.GroupIDs { + g = append(g, fmt.Sprintf("%s-%d", lSID, k.GroupIDs[i].RelativeID)) + } + for _, s := range k.ExtraSIDs { + var exists = false + for _, es := range g { + if es == s.SID.String() { + exists = true + break + } + } + if !exists { + g = append(g, s.SID.String()) + } + } + for _, r := range k.ResourceGroupIDs { + var exists = false + s := fmt.Sprintf("%s-%d", k.ResourceGroupDomainSID.String(), r.RelativeID) + for _, es := range g { + if es == s { + exists = true + break + } + } + if !exists { + g = append(g, s) + } + } + return g +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/pac_type.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/pac_type.go new file mode 100644 index 00000000..fab2ad7c --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/pac_type.go @@ -0,0 +1,251 @@ +package pac + +import ( + "bytes" + "errors" + "fmt" + "log" + + "github.com/jcmturner/gokrb5/v8/crypto" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/types" + "github.com/jcmturner/rpc/v2/mstypes" +) + +const ( + infoTypeKerbValidationInfo uint32 = 1 + infoTypeCredentials uint32 = 2 + infoTypePACServerSignatureData uint32 = 6 + infoTypePACKDCSignatureData uint32 = 7 + infoTypePACClientInfo uint32 = 10 + infoTypeS4UDelegationInfo uint32 = 11 + infoTypeUPNDNSInfo uint32 = 12 + infoTypePACClientClaimsInfo uint32 = 13 + infoTypePACDeviceInfo uint32 = 14 + infoTypePACDeviceClaimsInfo uint32 = 15 +) + +// PACType implements: https://msdn.microsoft.com/en-us/library/cc237950.aspx +type PACType struct { + CBuffers uint32 + Version uint32 + Buffers []InfoBuffer + Data []byte + KerbValidationInfo *KerbValidationInfo + CredentialsInfo *CredentialsInfo + ServerChecksum *SignatureData + KDCChecksum *SignatureData + ClientInfo *ClientInfo + S4UDelegationInfo *S4UDelegationInfo + UPNDNSInfo *UPNDNSInfo + ClientClaimsInfo *ClientClaimsInfo + DeviceInfo *DeviceInfo + DeviceClaimsInfo *DeviceClaimsInfo + ZeroSigData []byte +} + +// InfoBuffer implements the PAC Info Buffer: https://msdn.microsoft.com/en-us/library/cc237954.aspx +type InfoBuffer struct { + ULType uint32 // A 32-bit unsigned integer in little-endian format that describes the type of data present in the buffer contained at Offset. + CBBufferSize uint32 // A 32-bit unsigned integer in little-endian format that contains the size, in bytes, of the buffer in the PAC located at Offset. + Offset uint64 // A 64-bit unsigned integer in little-endian format that contains the offset to the beginning of the buffer, in bytes, from the beginning of the PACTYPE structure. The data offset MUST be a multiple of eight. The following sections specify the format of each type of element. +} + +// Unmarshal bytes into the PACType struct +func (pac *PACType) Unmarshal(b []byte) (err error) { + pac.Data = b + zb := make([]byte, len(b), len(b)) + copy(zb, b) + pac.ZeroSigData = zb + r := mstypes.NewReader(bytes.NewReader(b)) + pac.CBuffers, err = r.Uint32() + if err != nil { + return + } + pac.Version, err = r.Uint32() + if err != nil { + return + } + buf := make([]InfoBuffer, pac.CBuffers, pac.CBuffers) + for i := range buf { + buf[i].ULType, err = r.Uint32() + if err != nil { + return + } + buf[i].CBBufferSize, err = r.Uint32() + if err != nil { + return + } + buf[i].Offset, err = r.Uint64() + if err != nil { + return + } + } + pac.Buffers = buf + return nil +} + +// ProcessPACInfoBuffers processes the PAC Info Buffers. +// https://msdn.microsoft.com/en-us/library/cc237954.aspx +func (pac *PACType) ProcessPACInfoBuffers(key types.EncryptionKey, l *log.Logger) error { + for _, buf := range pac.Buffers { + p := make([]byte, buf.CBBufferSize, buf.CBBufferSize) + copy(p, pac.Data[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)]) + switch buf.ULType { + case infoTypeKerbValidationInfo: + if pac.KerbValidationInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k KerbValidationInfo + err := k.Unmarshal(p) + if err != nil { + return fmt.Errorf("error processing KerbValidationInfo: %v", err) + } + pac.KerbValidationInfo = &k + case infoTypeCredentials: + // Currently PAC parsing is only useful on the service side in gokrb5 + // The CredentialsInfo are only useful when gokrb5 has implemented RFC4556 and only applied on the client side. + // Skipping CredentialsInfo - will be revisited under RFC4556 implementation. + continue + //if pac.CredentialsInfo != nil { + // //Must ignore subsequent buffers of this type + // continue + //} + //var k CredentialsInfo + //err := k.Unmarshal(p, key) // The encryption key used is the AS reply key only available to the client. + //if err != nil { + // return fmt.Errorf("error processing CredentialsInfo: %v", err) + //} + //pac.CredentialsInfo = &k + case infoTypePACServerSignatureData: + if pac.ServerChecksum != nil { + //Must ignore subsequent buffers of this type + continue + } + var k SignatureData + zb, err := k.Unmarshal(p) + copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb) + if err != nil { + return fmt.Errorf("error processing ServerChecksum: %v", err) + } + pac.ServerChecksum = &k + case infoTypePACKDCSignatureData: + if pac.KDCChecksum != nil { + //Must ignore subsequent buffers of this type + continue + } + var k SignatureData + zb, err := k.Unmarshal(p) + copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb) + if err != nil { + return fmt.Errorf("error processing KDCChecksum: %v", err) + } + pac.KDCChecksum = &k + case infoTypePACClientInfo: + if pac.ClientInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k ClientInfo + err := k.Unmarshal(p) + if err != nil { + return fmt.Errorf("error processing ClientInfo: %v", err) + } + pac.ClientInfo = &k + case infoTypeS4UDelegationInfo: + if pac.S4UDelegationInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k S4UDelegationInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process S4U_DelegationInfo: %v", err) + continue + } + pac.S4UDelegationInfo = &k + case infoTypeUPNDNSInfo: + if pac.UPNDNSInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k UPNDNSInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process UPN_DNSInfo: %v", err) + continue + } + pac.UPNDNSInfo = &k + case infoTypePACClientClaimsInfo: + if pac.ClientClaimsInfo != nil || len(p) < 1 { + //Must ignore subsequent buffers of this type + continue + } + var k ClientClaimsInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process ClientClaimsInfo: %v", err) + continue + } + pac.ClientClaimsInfo = &k + case infoTypePACDeviceInfo: + if pac.DeviceInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k DeviceInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process DeviceInfo: %v", err) + continue + } + pac.DeviceInfo = &k + case infoTypePACDeviceClaimsInfo: + if pac.DeviceClaimsInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k DeviceClaimsInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process DeviceClaimsInfo: %v", err) + continue + } + pac.DeviceClaimsInfo = &k + } + } + + if ok, err := pac.verify(key); !ok { + return err + } + + return nil +} + +func (pac *PACType) verify(key types.EncryptionKey) (bool, error) { + if pac.KerbValidationInfo == nil { + return false, errors.New("PAC Info Buffers does not contain a KerbValidationInfo") + } + if pac.ServerChecksum == nil { + return false, errors.New("PAC Info Buffers does not contain a ServerChecksum") + } + if pac.KDCChecksum == nil { + return false, errors.New("PAC Info Buffers does not contain a KDCChecksum") + } + if pac.ClientInfo == nil { + return false, errors.New("PAC Info Buffers does not contain a ClientInfo") + } + etype, err := crypto.GetChksumEtype(int32(pac.ServerChecksum.SignatureType)) + if err != nil { + return false, err + } + if ok := etype.VerifyChecksum(key.KeyValue, + pac.ZeroSigData, + pac.ServerChecksum.Signature, + keyusage.KERB_NON_KERB_CKSUM_SALT); !ok { + return false, errors.New("PAC service checksum verification failed") + } + + return true, nil +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/s4u_delegation_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/s4u_delegation_info.go new file mode 100644 index 00000000..da837d4b --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/s4u_delegation_info.go @@ -0,0 +1,26 @@ +package pac + +import ( + "bytes" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +// S4UDelegationInfo implements https://msdn.microsoft.com/en-us/library/cc237944.aspx +type S4UDelegationInfo struct { + S4U2proxyTarget mstypes.RPCUnicodeString // The name of the principal to whom the application can forward the ticket. + TransitedListSize uint32 + S4UTransitedServices []mstypes.RPCUnicodeString `ndr:"pointer,conformant"` // List of all services that have been delegated through by this client and subsequent services or servers.. Size is value of TransitedListSize +} + +// Unmarshal bytes into the S4UDelegationInfo struct +func (k *S4UDelegationInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(k) + if err != nil { + err = fmt.Errorf("error unmarshaling S4UDelegationInfo: %v", err) + } + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/signature_data.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/signature_data.go new file mode 100644 index 00000000..8f6aa58f --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/signature_data.go @@ -0,0 +1,67 @@ +package pac + +import ( + "bytes" + + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/rpc/v2/mstypes" +) + +/* +https://msdn.microsoft.com/en-us/library/cc237955.aspx + +The Key Usage Value MUST be KERB_NON_KERB_CKSUM_SALT (17) [MS-KILE] (section 3.1.5.9). + +Server Signature (SignatureType = 0x00000006) +https://msdn.microsoft.com/en-us/library/cc237957.aspx + +KDC Signature (SignatureType = 0x00000007) +https://msdn.microsoft.com/en-us/library/dd357117.aspx +*/ + +// SignatureData implements https://msdn.microsoft.com/en-us/library/cc237955.aspx +type SignatureData struct { + SignatureType uint32 // A 32-bit unsigned integer value in little-endian format that defines the cryptographic system used to calculate the checksum. This MUST be one of the following checksum types: KERB_CHECKSUM_HMAC_MD5 (signature size = 16), HMAC_SHA1_96_AES128 (signature size = 12), HMAC_SHA1_96_AES256 (signature size = 12). + Signature []byte // Size depends on the type. See comment above. + RODCIdentifier uint16 // A 16-bit unsigned integer value in little-endian format that contains the first 16 bits of the key version number ([MS-KILE] section 3.1.5.8) when the KDC is an RODC. When the KDC is not an RODC, this field does not exist. +} + +// Unmarshal bytes into the SignatureData struct +func (k *SignatureData) Unmarshal(b []byte) (rb []byte, err error) { + r := mstypes.NewReader(bytes.NewReader(b)) + + k.SignatureType, err = r.Uint32() + if err != nil { + return + } + + var c int + switch k.SignatureType { + case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED: + c = 16 + case uint32(chksumtype.HMAC_SHA1_96_AES128): + c = 12 + case uint32(chksumtype.HMAC_SHA1_96_AES256): + c = 12 + } + k.Signature, err = r.ReadBytes(c) + if err != nil { + return + } + + // When the KDC is not an Read Only Domain Controller (RODC), this field does not exist. + if len(b) >= 4+c+2 { + k.RODCIdentifier, err = r.Uint16() + if err != nil { + return + } + } + + // Create bytes with zeroed signature needed for checksum verification + rb = make([]byte, len(b), len(b)) + copy(rb, b) + z := make([]byte, len(b), len(b)) + copy(rb[4:4+c], z) + + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go new file mode 100644 index 00000000..d40679d4 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go @@ -0,0 +1,87 @@ +package pac + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + + "github.com/jcmturner/rpc/v2/mstypes" + "github.com/jcmturner/rpc/v2/ndr" +) + +const ( + // NTLMSupCredLMOWF indicates that the LM OWF member is present and valid. + NTLMSupCredLMOWF uint32 = 31 + // NTLMSupCredNTOWF indicates that the NT OWF member is present and valid. + NTLMSupCredNTOWF uint32 = 30 +) + +// NTLMSupplementalCred implements https://msdn.microsoft.com/en-us/library/cc237949.aspx +type NTLMSupplementalCred struct { + Version uint32 // A 32-bit unsigned integer that defines the credential version.This field MUST be 0x00000000. + Flags uint32 + LMPassword []byte // A 16-element array of unsigned 8-bit integers that define the LM OWF. The LMPassword member MUST be ignored if the L flag is not set in the Flags member. + NTPassword []byte // A 16-element array of unsigned 8-bit integers that define the NT OWF. The NTPassword member MUST be ignored if the N flag is not set in the Flags member. +} + +// Unmarshal converts the bytes provided into a NTLMSupplementalCred. +func (c *NTLMSupplementalCred) Unmarshal(b []byte) (err error) { + r := mstypes.NewReader(bytes.NewReader(b)) + c.Version, err = r.Uint32() + if err != nil { + return + } + if c.Version != 0 { + err = errors.New("NTLMSupplementalCred version is not zero") + return + } + c.Flags, err = r.Uint32() + if err != nil { + return + } + if isFlagSet(c.Flags, NTLMSupCredLMOWF) { + c.LMPassword, err = r.ReadBytes(16) + if err != nil { + return + } + } + if isFlagSet(c.Flags, NTLMSupCredNTOWF) { + c.NTPassword, err = r.ReadBytes(16) + if err != nil { + return + } + } + return +} + +// isFlagSet tests if a flag is set in the uint32 little endian flag +func isFlagSet(f uint32, i uint32) bool { + //Which byte? + b := int(i / 8) + //Which bit in byte + p := uint(7 - (int(i) - 8*b)) + fb := make([]byte, 4) + binary.LittleEndian.PutUint32(fb, f) + if fb[b]&(1<: +func GetHostAddress(s string) (HostAddress, error) { + var h HostAddress + cAddr, _, err := net.SplitHostPort(s) + if err != nil { + return h, fmt.Errorf("invalid format of client address: %v", err) + } + ip := net.ParseIP(cAddr) + var ht int32 + if ip.To4() != nil { + ht = addrtype.IPv4 + ip = ip.To4() + } else if ip.To16() != nil { + ht = addrtype.IPv6 + ip = ip.To16() + } else { + return h, fmt.Errorf("could not determine client's address types: %v", err) + } + h = HostAddress{ + AddrType: ht, + Address: ip, + } + return h, nil +} + +// GetAddress returns a string representation of the HostAddress. +func (h *HostAddress) GetAddress() (string, error) { + var b []byte + _, err := asn1.Unmarshal(h.Address, &b) + return string(b), err +} + +// LocalHostAddresses returns a HostAddresses struct for the local machines interface IP addresses. +func LocalHostAddresses() (ha HostAddresses, err error) { + ifs, err := net.Interfaces() + if err != nil { + return + } + for _, iface := range ifs { + if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 { + // Interface is either loopback of not up + continue + } + addrs, err := iface.Addrs() + if err != nil { + continue + } + for _, addr := range addrs { + var ip net.IP + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + var a HostAddress + if ip.To16() == nil { + //neither IPv4 or IPv6 + continue + } + if ip.To4() != nil { + //Is IPv4 + a.AddrType = addrtype.IPv4 + a.Address = ip.To4() + } else { + a.AddrType = addrtype.IPv6 + a.Address = ip.To16() + } + ha = append(ha, a) + } + } + return ha, nil +} + +// HostAddressesFromNetIPs returns a HostAddresses type from a slice of net.IP +func HostAddressesFromNetIPs(ips []net.IP) (ha HostAddresses) { + for _, ip := range ips { + ha = append(ha, HostAddressFromNetIP(ip)) + } + return ha +} + +// HostAddressFromNetIP returns a HostAddress type from a net.IP +func HostAddressFromNetIP(ip net.IP) HostAddress { + if ip.To4() != nil { + //Is IPv4 + return HostAddress{ + AddrType: addrtype.IPv4, + Address: ip.To4(), + } + } + return HostAddress{ + AddrType: addrtype.IPv6, + Address: ip.To16(), + } +} + +// HostAddressesEqual tests if two HostAddress slices are equal. +func HostAddressesEqual(h, a []HostAddress) bool { + if len(h) != len(a) { + return false + } + for _, e := range a { + var found bool + for _, i := range h { + if e.Equal(i) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// HostAddressesContains tests if a HostAddress is contained in a HostAddress slice. +func HostAddressesContains(h []HostAddress, a HostAddress) bool { + for _, e := range h { + if e.Equal(a) { + return true + } + } + return false +} + +// Equal tests if the HostAddress is equal to another HostAddress provided. +func (h *HostAddress) Equal(a HostAddress) bool { + if h.AddrType != a.AddrType { + return false + } + return bytes.Equal(h.Address, a.Address) +} + +// Contains tests if a HostAddress is contained within the HostAddresses struct. +func (h *HostAddresses) Contains(a HostAddress) bool { + for _, e := range *h { + if e.Equal(a) { + return true + } + } + return false +} + +// Equal tests if a HostAddress slice is equal to the HostAddresses struct. +func (h *HostAddresses) Equal(a []HostAddress) bool { + if len(*h) != len(a) { + return false + } + for _, e := range a { + if !h.Contains(e) { + return false + } + } + return true +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go b/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go new file mode 100644 index 00000000..0f203834 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go @@ -0,0 +1,68 @@ +package types + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.2.8 + +import ( + "github.com/jcmturner/gofork/encoding/asn1" +) + +// NewKrbFlags returns an ASN1 BitString struct of the right size for KrbFlags. +func NewKrbFlags() asn1.BitString { + f := asn1.BitString{} + f.Bytes = make([]byte, 4) + f.BitLength = len(f.Bytes) * 8 + return f +} + +// SetFlags sets the flags of an ASN1 BitString. +func SetFlags(f *asn1.BitString, j []int) { + for _, i := range j { + SetFlag(f, i) + } +} + +// SetFlag sets a flag in an ASN1 BitString. +func SetFlag(f *asn1.BitString, i int) { + for l := len(f.Bytes); l < 4; l++ { + (*f).Bytes = append((*f).Bytes, byte(0)) + (*f).BitLength = len((*f).Bytes) * 8 + } + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + (*f).Bytes[b] = (*f).Bytes[b] | (1 << p) +} + +// UnsetFlags unsets flags in an ASN1 BitString. +func UnsetFlags(f *asn1.BitString, j []int) { + for _, i := range j { + UnsetFlag(f, i) + } +} + +// UnsetFlag unsets a flag in an ASN1 BitString. +func UnsetFlag(f *asn1.BitString, i int) { + for l := len(f.Bytes); l < 4; l++ { + (*f).Bytes = append((*f).Bytes, byte(0)) + (*f).BitLength = len((*f).Bytes) * 8 + } + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + (*f).Bytes[b] = (*f).Bytes[b] &^ (1 << p) +} + +// IsFlagSet tests if a flag is set in the ASN1 BitString. +func IsFlagSet(f *asn1.BitString, i int) bool { + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + if (*f).Bytes[b]&(1</@ +// a PrincipalName type will be returned with the name type set to KRB_NT_PRINCIPAL(1) +// and the realm will be returned as a string. If the "@" suffix +// is not included in the SPN then the value of realm string returned will be "" +func ParseSPNString(spn string) (pn PrincipalName, realm string) { + if strings.Contains(spn, "@") { + s := strings.Split(spn, "@") + realm = s[len(s)-1] + spn = strings.TrimSuffix(spn, "@"+realm) + } + pn = NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn) + return +} diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/TypedData.go b/vendor/github.com/jcmturner/gokrb5/v8/types/TypedData.go new file mode 100644 index 00000000..19e9f496 --- /dev/null +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/TypedData.go @@ -0,0 +1,18 @@ +package types + +import "github.com/jcmturner/gofork/encoding/asn1" + +// TypedData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1 +type TypedData struct { + DataType int32 `asn1:"explicit,tag:0"` + DataValue []byte `asn1:"optional,explicit,tag:1"` +} + +// TypedDataSequence implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1 +type TypedDataSequence []TypedData + +// Unmarshal bytes into the TypedDataSequence. +func (a *TypedDataSequence) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} diff --git a/vendor/github.com/jcmturner/rpc/v2/LICENSE b/vendor/github.com/jcmturner/rpc/v2/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/claims.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/claims.go new file mode 100644 index 00000000..b9f535f5 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/claims.go @@ -0,0 +1,152 @@ +package mstypes + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + + "github.com/jcmturner/rpc/v2/ndr" + "golang.org/x/net/http2/hpack" +) + +// Compression format assigned numbers. https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-xca/a8b7cb0a-92a6-4187-a23b-5e14273b96f8 +const ( + CompressionFormatNone uint16 = 0 + CompressionFormatLZNT1 uint16 = 2 // LZNT1 aka ntfs compression + CompressionFormatXPress uint16 = 3 // plain LZ77 + CompressionFormatXPressHuff uint16 = 4 // LZ77+Huffman - The Huffman variant of the XPRESS compression format uses LZ77-style dictionary compression combined with Huffman coding. +) + +// ClaimsSourceTypeAD https://msdn.microsoft.com/en-us/library/hh553809.aspx +const ClaimsSourceTypeAD uint16 = 1 + +// Claim Type assigned numbers +const ( + ClaimTypeIDInt64 uint16 = 1 + ClaimTypeIDUInt64 uint16 = 2 + ClaimTypeIDString uint16 = 3 + ClaimsTypeIDBoolean uint16 = 6 +) + +// ClaimsBlob implements https://msdn.microsoft.com/en-us/library/hh554119.aspx +type ClaimsBlob struct { + Size uint32 + EncodedBlob EncodedBlob +} + +// EncodedBlob are the bytes of the encoded Claims +type EncodedBlob []byte + +// Size returns the size of the bytes of the encoded Claims +func (b EncodedBlob) Size(c interface{}) int { + cb := c.(ClaimsBlob) + return int(cb.Size) +} + +// ClaimsSetMetadata implements https://msdn.microsoft.com/en-us/library/hh554073.aspx +type ClaimsSetMetadata struct { + ClaimsSetSize uint32 + ClaimsSetBytes []byte `ndr:"pointer,conformant"` + CompressionFormat uint16 // Enum see constants for options + UncompressedClaimsSetSize uint32 + ReservedType uint16 + ReservedFieldSize uint32 + ReservedField []byte `ndr:"pointer,conformant"` +} + +// ClaimsSet reads the ClaimsSet type from the NDR encoded ClaimsSetBytes in the ClaimsSetMetadata +func (m *ClaimsSetMetadata) ClaimsSet() (c ClaimsSet, err error) { + if len(m.ClaimsSetBytes) < 1 { + err = errors.New("no bytes available for ClaimsSet") + return + } + // TODO switch statement to decompress ClaimsSetBytes + switch m.CompressionFormat { + case CompressionFormatLZNT1: + s := hex.EncodeToString(m.ClaimsSetBytes) + err = fmt.Errorf("ClaimsSet compressed, format LZNT1 not currently supported: %s", s) + return + case CompressionFormatXPress: + s := hex.EncodeToString(m.ClaimsSetBytes) + err = fmt.Errorf("ClaimsSet compressed, format XPress not currently supported: %s", s) + return + case CompressionFormatXPressHuff: + var b []byte + buff := bytes.NewBuffer(b) + _, e := hpack.HuffmanDecode(buff, m.ClaimsSetBytes) + if e != nil { + err = fmt.Errorf("error deflating: %v", e) + return + } + m.ClaimsSetBytes = buff.Bytes() + } + dec := ndr.NewDecoder(bytes.NewReader(m.ClaimsSetBytes)) + err = dec.Decode(&c) + return +} + +// ClaimsSet implements https://msdn.microsoft.com/en-us/library/hh554122.aspx +type ClaimsSet struct { + ClaimsArrayCount uint32 + ClaimsArrays []ClaimsArray `ndr:"pointer,conformant"` + ReservedType uint16 + ReservedFieldSize uint32 + ReservedField []byte `ndr:"pointer,conformant"` +} + +// ClaimsArray implements https://msdn.microsoft.com/en-us/library/hh536458.aspx +type ClaimsArray struct { + ClaimsSourceType uint16 + ClaimsCount uint32 + ClaimEntries []ClaimEntry `ndr:"pointer,conformant"` +} + +// ClaimEntry is a NDR union that implements https://msdn.microsoft.com/en-us/library/hh536374.aspx +type ClaimEntry struct { + ID string `ndr:"pointer,conformant,varying"` + Type uint16 `ndr:"unionTag"` + TypeInt64 ClaimTypeInt64 `ndr:"unionField"` + TypeUInt64 ClaimTypeUInt64 `ndr:"unionField"` + TypeString ClaimTypeString `ndr:"unionField"` + TypeBool ClaimTypeBoolean `ndr:"unionField"` +} + +// SwitchFunc is the ClaimEntry union field selection function +func (u ClaimEntry) SwitchFunc(_ interface{}) string { + switch u.Type { + case ClaimTypeIDInt64: + return "TypeInt64" + case ClaimTypeIDUInt64: + return "TypeUInt64" + case ClaimTypeIDString: + return "TypeString" + case ClaimsTypeIDBoolean: + return "TypeBool" + } + return "" +} + +// ClaimTypeInt64 is a claim of type int64 +type ClaimTypeInt64 struct { + ValueCount uint32 + Value []int64 `ndr:"pointer,conformant"` +} + +// ClaimTypeUInt64 is a claim of type uint64 +type ClaimTypeUInt64 struct { + ValueCount uint32 + Value []uint64 `ndr:"pointer,conformant"` +} + +// ClaimTypeString is a claim of type string +type ClaimTypeString struct { + ValueCount uint32 + Value []LPWSTR `ndr:"pointer,conformant"` +} + +// ClaimTypeBoolean is a claim of type bool +type ClaimTypeBoolean struct { + ValueCount uint32 + Value []bool `ndr:"pointer,conformant"` +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/common.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/common.go new file mode 100644 index 00000000..fb6510d1 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/common.go @@ -0,0 +1,12 @@ +// Package mstypes provides implemnations of some Microsoft data types [MS-DTYP] https://msdn.microsoft.com/en-us/library/cc230283.aspx +package mstypes + +// LPWSTR implements https://msdn.microsoft.com/en-us/library/cc230355.aspx +type LPWSTR struct { + Value string `ndr:"pointer,conformant,varying"` +} + +// String returns the string representation of LPWSTR data type. +func (s *LPWSTR) String() string { + return s.Value +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/filetime.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/filetime.go new file mode 100644 index 00000000..5cc952fa --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/filetime.go @@ -0,0 +1,52 @@ +// Package mstypes implements representations of Microsoft types +package mstypes + +import ( + "time" +) + +/* +FILETIME is a windows data structure. +Ref: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724284%28v=vs.85%29.aspx +It contains two parts that are 32bit integers: + dwLowDateTime + dwHighDateTime +We need to combine these two into one 64bit integer. +This gives the number of 100 nano second period from January 1, 1601, Coordinated Universal Time (UTC) +*/ + +const unixEpochDiff = 116444736000000000 + +// FileTime implements the Microsoft FILETIME type https://msdn.microsoft.com/en-us/library/cc230324.aspx +type FileTime struct { + LowDateTime uint32 + HighDateTime uint32 +} + +// Time return a golang Time type from the FileTime +func (ft FileTime) Time() time.Time { + ns := (ft.MSEpoch() - unixEpochDiff) * 100 + return time.Unix(0, int64(ns)).UTC() +} + +// MSEpoch returns the FileTime as a Microsoft epoch, the number of 100 nano second periods elapsed from January 1, 1601 UTC. +func (ft FileTime) MSEpoch() int64 { + return (int64(ft.HighDateTime) << 32) + int64(ft.LowDateTime) +} + +// Unix returns the FileTime as a Unix time, the number of seconds elapsed since January 1, 1970 UTC. +func (ft FileTime) Unix() int64 { + return (ft.MSEpoch() - unixEpochDiff) / 10000000 +} + +// GetFileTime returns a FileTime type from the provided Golang Time type. +func GetFileTime(t time.Time) FileTime { + ns := t.UnixNano() + fp := (ns / 100) + unixEpochDiff + hd := fp >> 32 + ld := fp - (hd << 32) + return FileTime{ + LowDateTime: uint32(ld), + HighDateTime: uint32(hd), + } +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/group_membership.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/group_membership.go new file mode 100644 index 00000000..79151378 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/group_membership.go @@ -0,0 +1,19 @@ +package mstypes + +// GroupMembership implements https://msdn.microsoft.com/en-us/library/cc237945.aspx +// RelativeID : A 32-bit unsigned integer that contains the RID of a particular group. +// The possible values for the Attributes flags are identical to those specified in KERB_SID_AND_ATTRIBUTES +type GroupMembership struct { + RelativeID uint32 + Attributes uint32 +} + +// DomainGroupMembership implements https://msdn.microsoft.com/en-us/library/hh536344.aspx +// DomainId: A SID structure that contains the SID for the domain.This member is used in conjunction with the GroupIds members to create group SIDs for the device. +// GroupCount: A 32-bit unsigned integer that contains the number of groups within the domain to which the account belongs. +// GroupIds: A pointer to a list of GROUP_MEMBERSHIP structures that contain the groups to which the account belongs in the domain. The number of groups in this list MUST be equal to GroupCount. +type DomainGroupMembership struct { + DomainID RPCSID `ndr:"pointer"` + GroupCount uint32 + GroupIDs []GroupMembership `ndr:"pointer,conformant"` // Size is value of GroupCount +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/kerb_sid_and_attributes.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/kerb_sid_and_attributes.go new file mode 100644 index 00000000..61ac39bb --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/kerb_sid_and_attributes.go @@ -0,0 +1,23 @@ +package mstypes + +// Attributes of a security group membership and can be combined by using the bitwise OR operation. +// They are used by an access check mechanism to specify whether the membership is to be used in an access check decision. +const ( + SEGroupMandatory = 31 + SEGroupEnabledByDefault = 30 + SEGroupEnabled = 29 + SEGroupOwner = 28 + SEGroupResource = 2 + //All other bits MUST be set to zero and MUST be ignored on receipt. +) + +// KerbSidAndAttributes implements https://msdn.microsoft.com/en-us/library/cc237947.aspx +type KerbSidAndAttributes struct { + SID RPCSID `ndr:"pointer"` // A pointer to an RPC_SID structure. + Attributes uint32 +} + +// SetFlag sets a flag in a uint32 attribute value. +func SetFlag(a *uint32, i uint) { + *a = *a | (1 << (31 - i)) +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/reader.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/reader.go new file mode 100644 index 00000000..24495bca --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/reader.go @@ -0,0 +1,109 @@ +package mstypes + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" +) + +// Byte sizes of primitive types +const ( + SizeBool = 1 + SizeChar = 1 + SizeUint8 = 1 + SizeUint16 = 2 + SizeUint32 = 4 + SizeUint64 = 8 + SizeEnum = 2 + SizeSingle = 4 + SizeDouble = 8 + SizePtr = 4 +) + +// Reader reads simple byte stream data into a Go representations +type Reader struct { + r *bufio.Reader // source of the data +} + +// NewReader creates a new instance of a simple Reader. +func NewReader(r io.Reader) *Reader { + reader := new(Reader) + reader.r = bufio.NewReader(r) + return reader +} + +func (r *Reader) Read(p []byte) (n int, err error) { + return r.r.Read(p) +} + +func (r *Reader) Uint8() (uint8, error) { + b, err := r.r.ReadByte() + if err != nil { + return uint8(0), err + } + return uint8(b), nil +} + +func (r *Reader) Uint16() (uint16, error) { + b, err := r.ReadBytes(SizeUint16) + if err != nil { + return uint16(0), err + } + return binary.LittleEndian.Uint16(b), nil +} + +func (r *Reader) Uint32() (uint32, error) { + b, err := r.ReadBytes(SizeUint32) + if err != nil { + return uint32(0), err + } + return binary.LittleEndian.Uint32(b), nil +} + +func (r *Reader) Uint64() (uint64, error) { + b, err := r.ReadBytes(SizeUint64) + if err != nil { + return uint64(0), err + } + return binary.LittleEndian.Uint64(b), nil +} + +func (r *Reader) FileTime() (f FileTime, err error) { + f.LowDateTime, err = r.Uint32() + if err != nil { + return + } + f.HighDateTime, err = r.Uint32() + if err != nil { + return + } + return +} + +// UTF16String returns a string that is UTF16 encoded in a byte slice. n is the number of bytes representing the string +func (r *Reader) UTF16String(n int) (str string, err error) { + //Length divided by 2 as each run is 16bits = 2bytes + s := make([]rune, n/2, n/2) + for i := 0; i < len(s); i++ { + var u uint16 + u, err = r.Uint16() + if err != nil { + return + } + s[i] = rune(u) + } + str = string(s) + return +} + +// readBytes returns a number of bytes from the NDR byte stream. +func (r *Reader) ReadBytes(n int) ([]byte, error) { + //TODO make this take an int64 as input to allow for larger values on all systems? + b := make([]byte, n, n) + m, err := r.r.Read(b) + if err != nil || m != n { + return b, fmt.Errorf("error reading bytes from stream: %v", err) + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/rpc_unicode_string.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/rpc_unicode_string.go new file mode 100644 index 00000000..4bf02e0e --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/rpc_unicode_string.go @@ -0,0 +1,13 @@ +package mstypes + +// RPCUnicodeString implements https://msdn.microsoft.com/en-us/library/cc230365.aspx +type RPCUnicodeString struct { + Length uint16 // The length, in bytes, of the string pointed to by the Buffer member, not including the terminating null character if any. The length MUST be a multiple of 2. The length SHOULD equal the entire size of the Buffer, in which case there is no terminating null character. Any method that accesses this structure MUST use the Length specified instead of relying on the presence or absence of a null character. + MaximumLength uint16 // The maximum size, in bytes, of the string pointed to by Buffer. The size MUST be a multiple of 2. If not, the size MUST be decremented by 1 prior to use. This value MUST not be less than Length. + Value string `ndr:"pointer,conformant,varying"` +} + +// String returns the RPCUnicodeString string value +func (r *RPCUnicodeString) String() string { + return r.Value +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/sid.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/sid.go new file mode 100644 index 00000000..8e347058 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/sid.go @@ -0,0 +1,36 @@ +package mstypes + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "math" + "strings" +) + +// RPCSID implements https://msdn.microsoft.com/en-us/library/cc230364.aspx +type RPCSID struct { + Revision uint8 // An 8-bit unsigned integer that specifies the revision level of the SID. This value MUST be set to 0x01. + SubAuthorityCount uint8 // An 8-bit unsigned integer that specifies the number of elements in the SubAuthority array. The maximum number of elements allowed is 15. + IdentifierAuthority [6]byte // An RPC_SID_IDENTIFIER_AUTHORITY structure that indicates the authority under which the SID was created. It describes the entity that created the SID. The Identifier Authority value {0,0,0,0,0,5} denotes SIDs created by the NT SID authority. + SubAuthority []uint32 `ndr:"conformant"` // A variable length array of unsigned 32-bit integers that uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined by SubAuthorityCount. +} + +// String returns the string representation of the RPC_SID. +func (s *RPCSID) String() string { + var strb strings.Builder + strb.WriteString("S-1-") + + b := append(make([]byte, 2, 2), s.IdentifierAuthority[:]...) + // For a strange reason this is read big endian: https://msdn.microsoft.com/en-us/library/dd302645.aspx + i := binary.BigEndian.Uint64(b) + if i > math.MaxUint32 { + fmt.Fprintf(&strb, "0x%s", hex.EncodeToString(s.IdentifierAuthority[:])) + } else { + fmt.Fprintf(&strb, "%d", i) + } + for _, sub := range s.SubAuthority { + fmt.Fprintf(&strb, "-%d", sub) + } + return strb.String() +} diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go new file mode 100644 index 00000000..fcf0a5d9 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go @@ -0,0 +1,11 @@ +package mstypes + +// CypherBlock implements https://msdn.microsoft.com/en-us/library/cc237040.aspx +type CypherBlock struct { + Data [8]byte // size = 8 +} + +// UserSessionKey implements https://msdn.microsoft.com/en-us/library/cc237080.aspx +type UserSessionKey struct { + CypherBlock [2]CypherBlock // size = 2 +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go b/vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go new file mode 100644 index 00000000..5e2def2a --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go @@ -0,0 +1,413 @@ +package ndr + +import ( + "errors" + "fmt" + "reflect" + "strconv" +) + +// intFromTag returns an int that is a value in a struct tag key/value pair +func intFromTag(tag reflect.StructTag, key string) (int, error) { + ndrTag := parseTags(tag) + d := 1 + if n, ok := ndrTag.Map[key]; ok { + i, err := strconv.Atoi(n) + if err != nil { + return d, fmt.Errorf("invalid dimensions tag [%s]: %v", n, err) + } + d = i + } + return d, nil +} + +// parseDimensions returns the a slice of the size of each dimension and type of the member at the deepest level. +func parseDimensions(v reflect.Value) (l []int, tb reflect.Type) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + t := v.Type() + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Array && t.Kind() != reflect.Slice { + return + } + l = append(l, v.Len()) + if t.Elem().Kind() == reflect.Array || t.Elem().Kind() == reflect.Slice { + // contains array or slice + var m []int + m, tb = parseDimensions(v.Index(0)) + l = append(l, m...) + } else { + tb = t.Elem() + } + return +} + +// sliceDimensions returns the count of dimensions a slice has. +func sliceDimensions(t reflect.Type) (d int, tb reflect.Type) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Slice { + d++ + var n int + n, tb = sliceDimensions(t.Elem()) + d += n + } else { + tb = t + } + return +} + +// makeSubSlices is a deep recursive creation/initialisation of multi-dimensional slices. +// Takes the reflect.Value of the 1st dimension and a slice of the lengths of the sub dimensions +func makeSubSlices(v reflect.Value, l []int) { + ty := v.Type().Elem() + if ty.Kind() != reflect.Slice { + return + } + for i := 0; i < v.Len(); i++ { + s := reflect.MakeSlice(ty, l[0], l[0]) + v.Index(i).Set(s) + // Are there more sub dimensions? + if len(l) > 1 { + makeSubSlices(v.Index(i), l[1:]) + } + } + return +} + +// multiDimensionalIndexPermutations returns all the permutations of the indexes of a multi-dimensional slice. +// The input is a slice of integers that indicates the max size/length of each dimension +func multiDimensionalIndexPermutations(l []int) (ps [][]int) { + z := make([]int, len(l), len(l)) // The zeros permutation + ps = append(ps, z) + // for each dimension, in reverse + for i := len(l) - 1; i >= 0; i-- { + ws := make([][]int, len(ps)) + copy(ws, ps) + //create a permutation for each of the iterations of the current dimension + for j := 1; j <= l[i]-1; j++ { + // For each existing permutation + for _, p := range ws { + np := make([]int, len(p), len(p)) + copy(np, p) + np[i] = j + ps = append(ps, np) + } + } + } + return +} + +// precedingMax reads off the next conformant max value +func (dec *Decoder) precedingMax() uint32 { + m := dec.conformantMax[0] + dec.conformantMax = dec.conformantMax[1:] + return m +} + +// fillFixedArray establishes if the fixed array is uni or multi dimensional and then fills it. +func (dec *Decoder) fillFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + l, t := parseDimensions(v) + if t.Kind() == reflect.String { + tag = reflect.StructTag(subStringArrayTag) + } + if len(l) < 1 { + return errors.New("could not establish dimensions of fixed array") + } + if len(l) == 1 { + err := dec.fillUniDimensionalFixedArray(v, tag, def) + if err != nil { + return fmt.Errorf("could not fill uni-dimensional fixed array: %v", err) + } + return nil + } + // Fixed array is multidimensional + ps := multiDimensionalIndexPermutations(l[:len(l)-1]) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + for _, i := range p { + a = a.Index(i) + } + // fill with the last dimension array + err := dec.fillUniDimensionalFixedArray(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill dimension %v of multi-dimensional fixed array: %v", p, err) + } + } + return nil +} + +// readUniDimensionalFixedArray reads an array (not slice) from the byte stream. +func (dec *Decoder) fillUniDimensionalFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + for i := 0; i < v.Len(); i++ { + err := dec.fill(v.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of fixed array: %v", i, err) + } + } + return nil +} + +// fillConformantArray establishes if the conformant array is uni or multi dimensional and then fills the slice. +func (dec *Decoder) fillConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, _ := sliceDimensions(v.Type()) + if d > 1 { + err := dec.fillMultiDimensionalConformantArray(v, d, tag, def) + if err != nil { + return err + } + } else { + err := dec.fillUniDimensionalConformantArray(v, tag, def) + if err != nil { + return err + } + } + return nil +} + +// fillUniDimensionalConformantArray fills the uni-dimensional slice value. +func (dec *Decoder) fillUniDimensionalConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + m := dec.precedingMax() + n := int(m) + a := reflect.MakeSlice(v.Type(), n, n) + for i := 0; i < n; i++ { + err := dec.fill(a.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of uni-dimensional conformant array: %v", i, err) + } + } + v.Set(a) + return nil +} + +// fillMultiDimensionalConformantArray fills the multi-dimensional slice value provided from conformant array data. +// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this +// method not to panic. +func (dec *Decoder) fillMultiDimensionalConformantArray(v reflect.Value, d int, tag reflect.StructTag, def *[]deferedPtr) error { + // Read the max size of each dimensions from the ndr stream + l := make([]int, d, d) + for i := range l { + l[i] = int(dec.precedingMax()) + } + // Initialise size of slices + // Initialise the size of the 1st dimension + ty := v.Type() + v.Set(reflect.MakeSlice(ty, l[0], l[0])) + // Initialise the size of the other dimensions recursively + makeSubSlices(v, l[1:]) + + // Get all permutations of the indexes and go through each and fill + ps := multiDimensionalIndexPermutations(l) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + for _, i := range p { + a = a.Index(i) + } + err := dec.fill(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill index %v of slice: %v", p, err) + } + } + return nil +} + +// fillVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice. +func (dec *Decoder) fillVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, t := sliceDimensions(v.Type()) + if d > 1 { + err := dec.fillMultiDimensionalVaryingArray(v, t, d, tag, def) + if err != nil { + return err + } + } else { + err := dec.fillUniDimensionalVaryingArray(v, tag, def) + if err != nil { + return err + } + } + return nil +} + +// fillUniDimensionalVaryingArray fills the uni-dimensional slice value. +func (dec *Decoder) fillUniDimensionalVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + o, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of uni-dimensional varying array: %v", err) + } + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not establish actual count of uni-dimensional varying array: %v", err) + } + t := v.Type() + // Total size of the array is the offset in the index being passed plus the actual count of elements being passed. + n := int(s + o) + a := reflect.MakeSlice(t, n, n) + // Populate the array starting at the offset specified + for i := int(o); i < n; i++ { + err := dec.fill(a.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of uni-dimensional varying array: %v", i, err) + } + } + v.Set(a) + return nil +} + +// fillMultiDimensionalVaryingArray fills the multi-dimensional slice value provided from varying array data. +// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this +// method not to panic. +func (dec *Decoder) fillMultiDimensionalVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error { + // Read the offset and actual count of each dimensions from the ndr stream + o := make([]int, d, d) + l := make([]int, d, d) + for i := range l { + off, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err) + } + o[i] = int(off) + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read size of dimension %d: %v", i+1, err) + } + l[i] = int(s) + int(off) + } + // Initialise size of slices + // Initialise the size of the 1st dimension + ty := v.Type() + v.Set(reflect.MakeSlice(ty, l[0], l[0])) + // Initialise the size of the other dimensions recursively + makeSubSlices(v, l[1:]) + + // Get all permutations of the indexes and go through each and fill + ps := multiDimensionalIndexPermutations(l) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + var os bool // should this permutation be skipped due to the offset of any of the dimensions? + for i, j := range p { + if j < o[i] { + os = true + break + } + a = a.Index(j) + } + if os { + // This permutation should be skipped as it is less than the offset for one of the dimensions. + continue + } + err := dec.fill(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill index %v of slice: %v", p, err) + } + } + return nil +} + +// fillConformantVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice. +func (dec *Decoder) fillConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, t := sliceDimensions(v.Type()) + if d > 1 { + err := dec.fillMultiDimensionalConformantVaryingArray(v, t, d, tag, def) + if err != nil { + return err + } + } else { + err := dec.fillUniDimensionalConformantVaryingArray(v, tag, def) + if err != nil { + return err + } + } + return nil +} + +// fillUniDimensionalConformantVaryingArray fills the uni-dimensional slice value. +func (dec *Decoder) fillUniDimensionalConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + m := dec.precedingMax() + o, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of uni-dimensional conformant varying array: %v", err) + } + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not establish actual count of uni-dimensional conformant varying array: %v", err) + } + if m < o+s { + return errors.New("max count is less than the offset plus actual count") + } + t := v.Type() + n := int(s) + a := reflect.MakeSlice(t, n, n) + for i := int(o); i < n; i++ { + err := dec.fill(a.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of uni-dimensional conformant varying array: %v", i, err) + } + } + v.Set(a) + return nil +} + +// fillMultiDimensionalConformantVaryingArray fills the multi-dimensional slice value provided from conformant varying array data. +// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this +// method not to panic. +func (dec *Decoder) fillMultiDimensionalConformantVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error { + // Read the offset and actual count of each dimensions from the ndr stream + m := make([]int, d, d) + for i := range m { + m[i] = int(dec.precedingMax()) + } + o := make([]int, d, d) + l := make([]int, d, d) + for i := range l { + off, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err) + } + o[i] = int(off) + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read actual count of dimension %d: %v", i+1, err) + } + if m[i] < int(s)+int(off) { + m[i] = int(s) + int(off) + } + l[i] = int(s) + } + // Initialise size of slices + // Initialise the size of the 1st dimension + ty := v.Type() + v.Set(reflect.MakeSlice(ty, m[0], m[0])) + // Initialise the size of the other dimensions recursively + makeSubSlices(v, m[1:]) + + // Get all permutations of the indexes and go through each and fill + ps := multiDimensionalIndexPermutations(m) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + var os bool // should this permutation be skipped due to the offset of any of the dimensions or max is higher than the actual count being passed + for i, j := range p { + if j < o[i] || j >= l[i] { + os = true + break + } + a = a.Index(j) + } + if os { + // This permutation should be skipped as it is less than the offset for one of the dimensions. + continue + } + err := dec.fill(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill index %v of slice: %v", p, err) + } + } + return nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go b/vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go new file mode 100644 index 00000000..6157b4ef --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go @@ -0,0 +1,393 @@ +// Package ndr provides the ability to unmarshal NDR encoded byte steams into Go data structures +package ndr + +import ( + "bufio" + "fmt" + "io" + "reflect" + "strings" +) + +// Struct tag values +const ( + TagConformant = "conformant" + TagVarying = "varying" + TagPointer = "pointer" + TagPipe = "pipe" +) + +// Decoder unmarshals NDR byte stream data into a Go struct representation +type Decoder struct { + r *bufio.Reader // source of the data + size int // initial size of bytes in buffer + ch CommonHeader // NDR common header + ph PrivateHeader // NDR private header + conformantMax []uint32 // conformant max values that were moved to the beginning of the structure + s interface{} // pointer to the structure being populated + current []string // keeps track of the current field being populated +} + +type deferedPtr struct { + v reflect.Value + tag reflect.StructTag +} + +// NewDecoder creates a new instance of a NDR Decoder. +func NewDecoder(r io.Reader) *Decoder { + dec := new(Decoder) + dec.r = bufio.NewReader(r) + dec.r.Peek(int(commonHeaderBytes)) // For some reason an operation is needed on the buffer to initialise it so Buffered() != 0 + dec.size = dec.r.Buffered() + return dec +} + +// Decode unmarshals the NDR encoded bytes into the pointer of a struct provided. +func (dec *Decoder) Decode(s interface{}) error { + dec.s = s + err := dec.readCommonHeader() + if err != nil { + return err + } + err = dec.readPrivateHeader() + if err != nil { + return err + } + _, err = dec.r.Discard(4) //The next 4 bytes are an RPC unique pointer referent. We just skip these. + if err != nil { + return Errorf("unable to process byte stream: %v", err) + } + + return dec.process(s, reflect.StructTag("")) +} + +func (dec *Decoder) process(s interface{}, tag reflect.StructTag) error { + // Scan for conformant fields as their max counts are moved to the beginning + // http://pubs.opengroup.org/onlinepubs/9629399/chap14.htm#tagfcjh_37 + err := dec.scanConformantArrays(s, tag) + if err != nil { + return err + } + // Recursively fill the struct fields + var localDef []deferedPtr + err = dec.fill(s, tag, &localDef) + if err != nil { + return Errorf("could not decode: %v", err) + } + // Read any deferred referents associated with pointers + for _, p := range localDef { + err = dec.process(p.v, p.tag) + if err != nil { + return fmt.Errorf("could not decode deferred referent: %v", err) + } + } + return nil +} + +// scanConformantArrays scans the structure for embedded conformant fields and captures the maximum element counts for +// dimensions of the array that are moved to the beginning of the structure. +func (dec *Decoder) scanConformantArrays(s interface{}, tag reflect.StructTag) error { + err := dec.conformantScan(s, tag) + if err != nil { + return fmt.Errorf("failed to scan for embedded conformant arrays: %v", err) + } + for i := range dec.conformantMax { + dec.conformantMax[i], err = dec.readUint32() + if err != nil { + return fmt.Errorf("could not read preceding conformant max count index %d: %v", i, err) + } + } + return nil +} + +// conformantScan inspects the structure's fields for whether they are conformant. +func (dec *Decoder) conformantScan(s interface{}, tag reflect.StructTag) error { + ndrTag := parseTags(tag) + if ndrTag.HasValue(TagPointer) { + return nil + } + v := getReflectValue(s) + switch v.Kind() { + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + err := dec.conformantScan(v.Field(i), v.Type().Field(i).Tag) + if err != nil { + return err + } + } + case reflect.String: + if !ndrTag.HasValue(TagConformant) { + break + } + dec.conformantMax = append(dec.conformantMax, uint32(0)) + case reflect.Slice: + if !ndrTag.HasValue(TagConformant) { + break + } + d, t := sliceDimensions(v.Type()) + for i := 0; i < d; i++ { + dec.conformantMax = append(dec.conformantMax, uint32(0)) + } + // For string arrays there is a common max for the strings within the array. + if t.Kind() == reflect.String { + dec.conformantMax = append(dec.conformantMax, uint32(0)) + } + } + return nil +} + +func (dec *Decoder) isPointer(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) (bool, error) { + // Pointer so defer filling the referent + ndrTag := parseTags(tag) + if ndrTag.HasValue(TagPointer) { + p, err := dec.readUint32() + if err != nil { + return true, fmt.Errorf("could not read pointer: %v", err) + } + ndrTag.delete(TagPointer) + if p != 0 { + // if pointer is not zero add to the deferred items at end of stream + *def = append(*def, deferedPtr{v, ndrTag.StructTag()}) + } + return true, nil + } + return false, nil +} + +func getReflectValue(s interface{}) (v reflect.Value) { + if r, ok := s.(reflect.Value); ok { + v = r + } else { + if reflect.ValueOf(s).Kind() == reflect.Ptr { + v = reflect.ValueOf(s).Elem() + } + } + return +} + +// fill populates fields with values from the NDR byte stream. +func (dec *Decoder) fill(s interface{}, tag reflect.StructTag, localDef *[]deferedPtr) error { + v := getReflectValue(s) + + //// Pointer so defer filling the referent + ptr, err := dec.isPointer(v, tag, localDef) + if err != nil { + return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + if ptr { + return nil + } + + // Populate the value from the byte stream + switch v.Kind() { + case reflect.Struct: + dec.current = append(dec.current, v.Type().Name()) //Track the current field being filled + // in case struct is a union, track this and the selected union field for efficiency + var unionTag reflect.Value + var unionField string // field to fill if struct is a union + // Go through each field in the struct and recursively fill + for i := 0; i < v.NumField(); i++ { + fieldName := v.Type().Field(i).Name + dec.current = append(dec.current, fieldName) //Track the current field being filled + //fmt.Fprintf(os.Stderr, "DEBUG Decoding: %s\n", strings.Join(dec.current, "/")) + structTag := v.Type().Field(i).Tag + ndrTag := parseTags(structTag) + + // Union handling + if !unionTag.IsValid() { + // Is this field a union tag? + unionTag = dec.isUnion(v.Field(i), structTag) + } else { + // What is the selected field value of the union if we don't already know + if unionField == "" { + unionField, err = unionSelectedField(v, unionTag) + if err != nil { + return fmt.Errorf("could not determine selected union value field for %s with discriminat"+ + " tag %s: %v", v.Type().Name(), unionTag, err) + } + } + if ndrTag.HasValue(TagUnionField) && fieldName != unionField { + // is a union and this field has not been selected so will skip it. + dec.current = dec.current[:len(dec.current)-1] //This field has been skipped so remove it from the current field tracker + continue + } + } + + // Check if field is a pointer + if v.Field(i).Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) && + v.Field(i).Type().Kind() == reflect.Slice && v.Field(i).Type().Elem().Kind() == reflect.Uint8 { + //field is for rawbytes + structTag, err = addSizeToTag(v, v.Field(i), structTag) + if err != nil { + return fmt.Errorf("could not get rawbytes field(%s) size: %v", strings.Join(dec.current, "/"), err) + } + ptr, err := dec.isPointer(v.Field(i), structTag, localDef) + if err != nil { + return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + if !ptr { + err := dec.readRawBytes(v.Field(i), structTag) + if err != nil { + return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + } + } else { + err := dec.fill(v.Field(i), structTag, localDef) + if err != nil { + return fmt.Errorf("could not fill struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + } + dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker + } + dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker + case reflect.Bool: + i, err := dec.readBool() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint8: + i, err := dec.readUint8() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint16: + i, err := dec.readUint16() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint32: + i, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint64: + i, err := dec.readUint64() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int8: + i, err := dec.readInt8() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int16: + i, err := dec.readInt16() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int32: + i, err := dec.readInt32() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int64: + i, err := dec.readInt64() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.String: + ndrTag := parseTags(tag) + conformant := ndrTag.HasValue(TagConformant) + // strings are always varying so this is assumed without an explicit tag + var s string + var err error + if conformant { + s, err = dec.readConformantVaryingString(localDef) + if err != nil { + return fmt.Errorf("could not fill with conformant varying string: %v", err) + } + } else { + s, err = dec.readVaryingString(localDef) + if err != nil { + return fmt.Errorf("could not fill with varying string: %v", err) + } + } + v.Set(reflect.ValueOf(s)) + case reflect.Float32: + i, err := dec.readFloat32() + if err != nil { + return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Float64: + i, err := dec.readFloat64() + if err != nil { + return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Array: + err := dec.fillFixedArray(v, tag, localDef) + if err != nil { + return err + } + case reflect.Slice: + if v.Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) && v.Type().Elem().Kind() == reflect.Uint8 { + //field is for rawbytes + err := dec.readRawBytes(v, tag) + if err != nil { + return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + break + } + ndrTag := parseTags(tag) + conformant := ndrTag.HasValue(TagConformant) + varying := ndrTag.HasValue(TagVarying) + if ndrTag.HasValue(TagPipe) { + err := dec.fillPipe(v, tag) + if err != nil { + return err + } + break + } + _, t := sliceDimensions(v.Type()) + if t.Kind() == reflect.String && !ndrTag.HasValue(subStringArrayValue) { + // String array + err := dec.readStringsArray(v, tag, localDef) + if err != nil { + return err + } + break + } + // varying is assumed as fixed arrays use the Go array type rather than slice + if conformant && varying { + err := dec.fillConformantVaryingArray(v, tag, localDef) + if err != nil { + return err + } + } else if !conformant && varying { + err := dec.fillVaryingArray(v, tag, localDef) + if err != nil { + return err + } + } else { + //default to conformant and not varying + err := dec.fillConformantArray(v, tag, localDef) + if err != nil { + return err + } + } + default: + return fmt.Errorf("unsupported type") + } + return nil +} + +// readBytes returns a number of bytes from the NDR byte stream. +func (dec *Decoder) readBytes(n int) ([]byte, error) { + //TODO make this take an int64 as input to allow for larger values on all systems? + b := make([]byte, n, n) + m, err := dec.r.Read(b) + if err != nil || m != n { + return b, fmt.Errorf("error reading bytes from stream: %v", err) + } + return b, nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/error.go b/vendor/github.com/jcmturner/rpc/v2/ndr/error.go new file mode 100644 index 00000000..9971194d --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/error.go @@ -0,0 +1,18 @@ +package ndr + +import "fmt" + +// Malformed implements the error interface for malformed NDR encoding errors. +type Malformed struct { + EText string +} + +// Error implements the error interface on the Malformed struct. +func (e Malformed) Error() string { + return fmt.Sprintf("malformed NDR stream: %s", e.EText) +} + +// Errorf formats an error message into a malformed NDR error. +func Errorf(format string, a ...interface{}) Malformed { + return Malformed{EText: fmt.Sprintf(format, a...)} +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/header.go b/vendor/github.com/jcmturner/rpc/v2/ndr/header.go new file mode 100644 index 00000000..1970ddb6 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/header.go @@ -0,0 +1,116 @@ +package ndr + +import ( + "encoding/binary" + "fmt" +) + +/* +Serialization Version 1 +https://msdn.microsoft.com/en-us/library/cc243563.aspx + +Common Header - https://msdn.microsoft.com/en-us/library/cc243890.aspx +8 bytes in total: +- First byte - Version: Must equal 1 +- Second byte - 1st 4 bits: Endianess (0=Big; 1=Little); 2nd 4 bits: Character Encoding (0=ASCII; 1=EBCDIC) +- 3rd - Floating point representation (This does not seem to be the case in examples for Microsoft test sources) +- 4th - Common Header Length: Must equal 8 +- 5th - 8th - Filler: MUST be set to 0xcccccccc on marshaling, and SHOULD be ignored during unmarshaling. + +Private Header - https://msdn.microsoft.com/en-us/library/cc243919.aspx +8 bytes in total: +- First 4 bytes - Indicates the length of a serialized top-level type in the octet stream. It MUST include the padding length and exclude the header itself. +- Second 4 bytes - Filler: MUST be set to 0 (zero) during marshaling, and SHOULD be ignored during unmarshaling. +*/ + +const ( + protocolVersion uint8 = 1 + commonHeaderBytes uint16 = 8 + bigEndian = 0 + littleEndian = 1 + ascii uint8 = 0 + ebcdic uint8 = 1 + ieee uint8 = 0 + vax uint8 = 1 + cray uint8 = 2 + ibm uint8 = 3 +) + +// CommonHeader implements the NDR common header: https://msdn.microsoft.com/en-us/library/cc243889.aspx +type CommonHeader struct { + Version uint8 + Endianness binary.ByteOrder + CharacterEncoding uint8 + FloatRepresentation uint8 + HeaderLength uint16 + Filler []byte +} + +// PrivateHeader implements the NDR private header: https://msdn.microsoft.com/en-us/library/cc243919.aspx +type PrivateHeader struct { + ObjectBufferLength uint32 + Filler []byte +} + +func (dec *Decoder) readCommonHeader() error { + // Version + vb, err := dec.r.ReadByte() + if err != nil { + return Malformed{EText: "could not read first byte of common header for version"} + } + dec.ch.Version = uint8(vb) + if dec.ch.Version != protocolVersion { + return Malformed{EText: fmt.Sprintf("byte stream does not indicate a RPC Type serialization of version %v", protocolVersion)} + } + // Read Endianness & Character Encoding + eb, err := dec.r.ReadByte() + if err != nil { + return Malformed{EText: "could not read second byte of common header for endianness"} + } + endian := int(eb >> 4 & 0xF) + if endian != 0 && endian != 1 { + return Malformed{EText: "common header does not indicate a valid endianness"} + } + dec.ch.CharacterEncoding = uint8(vb & 0xF) + if dec.ch.CharacterEncoding != 0 && dec.ch.CharacterEncoding != 1 { + return Malformed{EText: "common header does not indicate a valid character encoding"} + } + switch endian { + case littleEndian: + dec.ch.Endianness = binary.LittleEndian + case bigEndian: + dec.ch.Endianness = binary.BigEndian + } + // Common header length + lb, err := dec.readBytes(2) + if err != nil { + return Malformed{EText: fmt.Sprintf("could not read common header length: %v", err)} + } + dec.ch.HeaderLength = dec.ch.Endianness.Uint16(lb) + if dec.ch.HeaderLength != commonHeaderBytes { + return Malformed{EText: "common header does not indicate a valid length"} + } + // Filler bytes + dec.ch.Filler, err = dec.readBytes(4) + if err != nil { + return Malformed{EText: fmt.Sprintf("could not read common header filler: %v", err)} + } + return nil +} + +func (dec *Decoder) readPrivateHeader() error { + // The next 8 bytes after the common header comprise the RPC type marshalling private header for constructed types. + err := binary.Read(dec.r, dec.ch.Endianness, &dec.ph.ObjectBufferLength) + if err != nil { + return Malformed{EText: "could not read private header object buffer length"} + } + if dec.ph.ObjectBufferLength%8 != 0 { + return Malformed{EText: "object buffer length not a multiple of 8"} + } + // Filler bytes + dec.ph.Filler, err = dec.readBytes(4) + if err != nil { + return Malformed{EText: fmt.Sprintf("could not read private header filler: %v", err)} + } + return nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go b/vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go new file mode 100644 index 00000000..5fd27da0 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go @@ -0,0 +1,31 @@ +package ndr + +import ( + "fmt" + "reflect" +) + +func (dec *Decoder) fillPipe(v reflect.Value, tag reflect.StructTag) error { + s, err := dec.readUint32() // read element count of first chunk + if err != nil { + return err + } + a := reflect.MakeSlice(v.Type(), 0, 0) + c := reflect.MakeSlice(v.Type(), int(s), int(s)) + for s != 0 { + for i := 0; i < int(s); i++ { + err := dec.fill(c.Index(i), tag, &[]deferedPtr{}) + if err != nil { + return fmt.Errorf("could not fill element %d of pipe: %v", i, err) + } + } + s, err = dec.readUint32() // read element count of first chunk + if err != nil { + return err + } + a = reflect.AppendSlice(a, c) + c = reflect.MakeSlice(v.Type(), int(s), int(s)) + } + v.Set(a) + return nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go b/vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go new file mode 100644 index 00000000..7eb1d1af --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go @@ -0,0 +1,211 @@ +package ndr + +import ( + "bytes" + "encoding/binary" + "math" +) + +// Byte sizes of primitive types +const ( + SizeBool = 1 + SizeChar = 1 + SizeUint8 = 1 + SizeUint16 = 2 + SizeUint32 = 4 + SizeUint64 = 8 + SizeEnum = 2 + SizeSingle = 4 + SizeDouble = 8 + SizePtr = 4 +) + +// Bool is an NDR Boolean which is a logical quantity that assumes one of two values: TRUE or FALSE. +// NDR represents a Boolean as one octet. +// It represents a value of FALSE as a zero octet, an octet in which every bit is reset. +// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set. + +// Char is an NDR character. +// NDR represents a character as one octet. +// Characters have two representation formats: ASCII and EBCDIC. + +// USmall is an unsigned 8 bit integer + +// UShort is an unsigned 16 bit integer + +// ULong is an unsigned 32 bit integer + +// UHyper is an unsigned 64 bit integer + +// Small is an signed 8 bit integer + +// Short is an signed 16 bit integer + +// Long is an signed 32 bit integer + +// Hyper is an signed 64 bit integer + +// Enum is the NDR representation of enumerated types as signed short integers (2 octets) + +// Single is an NDR defined single-precision floating-point data type + +// Double is an NDR defined double-precision floating-point data type + +// readBool reads a byte representing a boolean. +// NDR represents a Boolean as one octet. +// It represents a value of FALSE as a zero octet, an octet in which every bit is reset. +// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set. +func (dec *Decoder) readBool() (bool, error) { + i, err := dec.readUint8() + if err != nil { + return false, err + } + if i != 0 { + return true, nil + } + return false, nil +} + +// readChar reads bytes representing a 8bit ASCII integer cast to a rune. +func (dec *Decoder) readChar() (rune, error) { + var r rune + a, err := dec.readUint8() + if err != nil { + return r, err + } + return rune(a), nil +} + +// readUint8 reads bytes representing a 8bit unsigned integer. +func (dec *Decoder) readUint8() (uint8, error) { + b, err := dec.r.ReadByte() + if err != nil { + return uint8(0), err + } + return uint8(b), nil +} + +// readUint16 reads bytes representing a 16bit unsigned integer. +func (dec *Decoder) readUint16() (uint16, error) { + dec.ensureAlignment(SizeUint16) + b, err := dec.readBytes(SizeUint16) + if err != nil { + return uint16(0), err + } + return dec.ch.Endianness.Uint16(b), nil +} + +// readUint32 reads bytes representing a 32bit unsigned integer. +func (dec *Decoder) readUint32() (uint32, error) { + dec.ensureAlignment(SizeUint32) + b, err := dec.readBytes(SizeUint32) + if err != nil { + return uint32(0), err + } + return dec.ch.Endianness.Uint32(b), nil +} + +// readUint32 reads bytes representing a 32bit unsigned integer. +func (dec *Decoder) readUint64() (uint64, error) { + dec.ensureAlignment(SizeUint64) + b, err := dec.readBytes(SizeUint64) + if err != nil { + return uint64(0), err + } + return dec.ch.Endianness.Uint64(b), nil +} + +func (dec *Decoder) readInt8() (int8, error) { + dec.ensureAlignment(SizeUint8) + b, err := dec.readBytes(SizeUint8) + if err != nil { + return 0, err + } + var i int8 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +func (dec *Decoder) readInt16() (int16, error) { + dec.ensureAlignment(SizeUint16) + b, err := dec.readBytes(SizeUint16) + if err != nil { + return 0, err + } + var i int16 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +func (dec *Decoder) readInt32() (int32, error) { + dec.ensureAlignment(SizeUint32) + b, err := dec.readBytes(SizeUint32) + if err != nil { + return 0, err + } + var i int32 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +func (dec *Decoder) readInt64() (int64, error) { + dec.ensureAlignment(SizeUint64) + b, err := dec.readBytes(SizeUint64) + if err != nil { + return 0, err + } + var i int64 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +// https://en.wikipedia.org/wiki/IEEE_754-1985 +func (dec *Decoder) readFloat32() (f float32, err error) { + dec.ensureAlignment(SizeSingle) + b, err := dec.readBytes(SizeSingle) + if err != nil { + return + } + bits := dec.ch.Endianness.Uint32(b) + f = math.Float32frombits(bits) + return +} + +func (dec *Decoder) readFloat64() (f float64, err error) { + dec.ensureAlignment(SizeDouble) + b, err := dec.readBytes(SizeDouble) + if err != nil { + return + } + bits := dec.ch.Endianness.Uint64(b) + f = math.Float64frombits(bits) + return +} + +// NDR enforces NDR alignment of primitive data; that is, any primitive of size n octets is aligned at a octet stream +// index that is a multiple of n. (In this version of NDR, n is one of {1, 2, 4, 8}.) An octet stream index indicates +// the number of an octet in an octet stream when octets are numbered, beginning with 0, from the first octet in the +// stream. Where necessary, an alignment gap, consisting of octets of unspecified value, precedes the representation +// of a primitive. The gap is of the smallest size sufficient to align the primitive. +func (dec *Decoder) ensureAlignment(n int) { + p := dec.size - dec.r.Buffered() + if s := p % n; s != 0 { + dec.r.Discard(n - s) + } +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go b/vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go new file mode 100644 index 00000000..9ee59fb1 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go @@ -0,0 +1,61 @@ +package ndr + +import ( + "errors" + "fmt" + "reflect" + "strconv" +) + +// type MyBytes []byte +// implement RawBytes interface + +const ( + sizeMethod = "Size" +) + +// RawBytes interface should be implemented if reading just a number of bytes from the NDR stream +type RawBytes interface { + Size(interface{}) int +} + +func rawBytesSize(parent reflect.Value, v reflect.Value) (int, error) { + sf := v.MethodByName(sizeMethod) + if !sf.IsValid() { + return 0, fmt.Errorf("could not find a method called %s on the implementation of RawBytes", sizeMethod) + } + in := []reflect.Value{parent} + f := sf.Call(in) + if f[0].Kind() != reflect.Int { + return 0, errors.New("the RawBytes size function did not return an integer") + } + return int(f[0].Int()), nil +} + +func addSizeToTag(parent reflect.Value, v reflect.Value, tag reflect.StructTag) (reflect.StructTag, error) { + size, err := rawBytesSize(parent, v) + if err != nil { + return tag, err + } + ndrTag := parseTags(tag) + ndrTag.Map["size"] = strconv.Itoa(size) + return ndrTag.StructTag(), nil +} + +func (dec *Decoder) readRawBytes(v reflect.Value, tag reflect.StructTag) error { + ndrTag := parseTags(tag) + sizeStr, ok := ndrTag.Map["size"] + if !ok { + return errors.New("size tag not available") + } + size, err := strconv.Atoi(sizeStr) + if err != nil { + return fmt.Errorf("size not valid: %v", err) + } + b, err := dec.readBytes(size) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b).Convert(v.Type())) + return nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/strings.go b/vendor/github.com/jcmturner/rpc/v2/ndr/strings.go new file mode 100644 index 00000000..b7a910b3 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/strings.go @@ -0,0 +1,70 @@ +package ndr + +import ( + "fmt" + "reflect" +) + +const ( + subStringArrayTag = `ndr:"varying,X-subStringArray"` + subStringArrayValue = "X-subStringArray" +) + +func uint16SliceToString(a []uint16) string { + s := make([]rune, len(a), len(a)) + for i := range s { + s[i] = rune(a[i]) + } + if len(s) > 0 { + // Remove any null terminator + if s[len(s)-1] == rune(0) { + s = s[:len(s)-1] + } + } + return string(s) +} + +func (dec *Decoder) readVaryingString(def *[]deferedPtr) (string, error) { + a := new([]uint16) + v := reflect.ValueOf(a) + var t reflect.StructTag + err := dec.fillUniDimensionalVaryingArray(v.Elem(), t, def) + if err != nil { + return "", err + } + s := uint16SliceToString(*a) + return s, nil +} + +func (dec *Decoder) readConformantVaryingString(def *[]deferedPtr) (string, error) { + a := new([]uint16) + v := reflect.ValueOf(a) + var t reflect.StructTag + err := dec.fillUniDimensionalConformantVaryingArray(v.Elem(), t, def) + if err != nil { + return "", err + } + s := uint16SliceToString(*a) + return s, nil +} + +func (dec *Decoder) readStringsArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, _ := sliceDimensions(v.Type()) + ndrTag := parseTags(tag) + var m []int + //var ms int + if ndrTag.HasValue(TagConformant) { + for i := 0; i < d; i++ { + m = append(m, int(dec.precedingMax())) + } + //common max size + _ = dec.precedingMax() + //ms = int(n) + } + tag = reflect.StructTag(subStringArrayTag) + err := dec.fillVaryingArray(v, tag, def) + if err != nil { + return fmt.Errorf("could not read string array: %v", err) + } + return nil +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/tags.go b/vendor/github.com/jcmturner/rpc/v2/ndr/tags.go new file mode 100644 index 00000000..01657e0a --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/tags.go @@ -0,0 +1,69 @@ +package ndr + +import ( + "fmt" + "reflect" + "strings" +) + +const ndrNameSpace = "ndr" + +type tags struct { + Values []string + Map map[string]string +} + +// parse the struct field tags and extract the ndr related ones. +// format of tag ndr:"value,key:value1,value2" +func parseTags(st reflect.StructTag) tags { + s := st.Get(ndrNameSpace) + t := tags{ + Values: []string{}, + Map: make(map[string]string), + } + if s != "" { + ndrTags := strings.Trim(s, `"`) + for _, tag := range strings.Split(ndrTags, ",") { + if strings.Contains(tag, ":") { + m := strings.SplitN(tag, ":", 2) + t.Map[m[0]] = m[1] + } else { + t.Values = append(t.Values, tag) + } + } + } + return t +} + +func appendTag(t reflect.StructTag, s string) reflect.StructTag { + ts := t.Get(ndrNameSpace) + ts = fmt.Sprintf(`%s"%s,%s"`, ndrNameSpace, ts, s) + return reflect.StructTag(ts) +} + +func (t *tags) StructTag() reflect.StructTag { + mv := t.Values + for key, val := range t.Map { + mv = append(mv, key+":"+val) + } + s := ndrNameSpace + ":" + `"` + strings.Join(mv, ",") + `"` + return reflect.StructTag(s) +} + +func (t *tags) delete(s string) { + for i, x := range t.Values { + if x == s { + t.Values = append(t.Values[:i], t.Values[i+1:]...) + } + } + delete(t.Map, s) +} + +func (t *tags) HasValue(s string) bool { + for _, v := range t.Values { + if v == s { + return true + } + } + return false +} diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/union.go b/vendor/github.com/jcmturner/rpc/v2/ndr/union.go new file mode 100644 index 00000000..6a657fa6 --- /dev/null +++ b/vendor/github.com/jcmturner/rpc/v2/ndr/union.go @@ -0,0 +1,57 @@ +package ndr + +import ( + "errors" + "fmt" + "reflect" +) + +// Union interface must be implemented by structs that will be unmarshaled into from the NDR byte stream union representation. +// The union's discriminating tag will be passed to the SwitchFunc method. +// The discriminating tag field must have the struct tag: `ndr:"unionTag"` +// If the union is encapsulated the discriminating tag field must have the struct tag: `ndr:"encapsulated"` +// The possible value fields that can be selected from must have the struct tag: `ndr:"unionField"` +type Union interface { + SwitchFunc(t interface{}) string +} + +// Union related constants such as struct tag values +const ( + unionSelectionFuncName = "SwitchFunc" + TagEncapsulated = "encapsulated" + TagUnionTag = "unionTag" + TagUnionField = "unionField" +) + +func (dec *Decoder) isUnion(field reflect.Value, tag reflect.StructTag) (r reflect.Value) { + ndrTag := parseTags(tag) + if !ndrTag.HasValue(TagUnionTag) { + return + } + r = field + // For a non-encapsulated union, the discriminant is marshalled into the transmitted data stream twice: once as the + // field or parameter, which is referenced by the switch_is construct, in the procedure argument list; and once as + // the first part of the union representation. + if !ndrTag.HasValue(TagEncapsulated) { + dec.r.Discard(int(r.Type().Size())) + } + return +} + +// unionSelectedField returns the field name of which of the union values to fill +func unionSelectedField(union, discriminant reflect.Value) (string, error) { + if !union.Type().Implements(reflect.TypeOf(new(Union)).Elem()) { + return "", errors.New("struct does not implement union interface") + } + args := []reflect.Value{discriminant} + // Call the SelectFunc of the union struct to find the name of the field to fill with the value selected. + sf := union.MethodByName(unionSelectionFuncName) + if !sf.IsValid() { + return "", fmt.Errorf("could not find a selection function called %s in the unions struct representation", unionSelectionFuncName) + } + f := sf.Call(args) + if f[0].Kind() != reflect.String || f[0].String() == "" { + return "", fmt.Errorf("the union select function did not return a string for the name of the field to fill") + } + return f[0].String(), nil +} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 00000000..1eb75ef6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 00000000..ea7324da --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 00000000..f65eb390 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 00000000..43e46361 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 00000000..abade2d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 00000000..b69237c9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,684 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + default: + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if uint16(charnum) > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < int(tableSize) { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = uint32(total >> tableLog) + lowOne = uint32((total * 3) >> (tableLog + 1)) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = uint32((total * 3) / (toDistribute * 2)) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < int(tableSize) { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + br.init(s.br.unread()) + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = uint16(in.getBits(tableLog)) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 00000000..535cbadf --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 00000000..b3d26295 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 00000000..8b6e5c66 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 00000000..a4979e88 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,329 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBit32(uint32(v))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) peekBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +func (b *bitReader) advance(n uint8) { + b.bitsRead += n +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderShifted) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 00000000..6bce4e87 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,210 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + return + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + b.bitContainer >>= 1 << 3 + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + b.bitContainer >>= 2 << 3 + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + b.bitContainer >>= 3 << 3 + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + b.bitContainer >>= 4 << 3 + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + b.bitContainer >>= 5 << 3 + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + b.bitContainer >>= 6 << 3 + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + b.bitContainer >>= 7 << 3 + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + b.bitContainer = 0 + b.nBits = 0 + return + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 00000000..50bcdf6e --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,54 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 00000000..f9ed5f83 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,657 @@ +package huff0 + +import ( + "fmt" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else { + if s.prevTable[i].nBits == 0 { + reuse = false + } + } + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := int16(startNode) + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count + huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].count = 1 << 30 + } + // fake entry, strong barrier + huffNode0[0].count = 1 << 31 + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count + huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].nbBits = 0 + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol].nBits = v.nbBits + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + } + return +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) + huffNode[n].nbBits = maxNbBits + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := uint8(maxNbBits) + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count + lowTotal := 2 * huffNode[lowPos].count + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].nbBits++ + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits == maxNbBits { + n-- + } + huffNode[n+1].nbBits-- + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].nbBits-- + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +type nodeElt struct { + count uint32 + parent uint16 + symbol byte + nbBits uint8 +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 00000000..41703bba --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1164 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle + double []dEntryDouble +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// double-symbols decoding +type dEntryDouble struct { + seq uint16 + nBits uint8 + len uint8 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(in) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, err + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + shift := (8 - d.actualTableLog) & 7 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + const shift = 0 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := br.off*8 + uint(64-br.bitsRead) + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (8 - d.actualTableLog) & 7 + + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 0 + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 00000000..7ec2022b --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,273 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore new file mode 100644 index 00000000..042091d9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS new file mode 100644 index 00000000..bcfa1952 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS new file mode 100644 index 00000000..931ae316 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/snappy/README b/vendor/github.com/klauspost/compress/snappy/README new file mode 100644 index 00000000..cea12879 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 00000000..72efb035 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go similarity index 100% rename from vendor/github.com/golang/snappy/decode_amd64.go rename to vendor/github.com/klauspost/compress/snappy/decode_amd64.go diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s new file mode 100644 index 00000000..1c66e372 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s @@ -0,0 +1,482 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + CMPQ SI, R13 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go new file mode 100644 index 00000000..94a96c5d --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_other.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 00000000..8d393e90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go similarity index 100% rename from vendor/github.com/golang/snappy/encode_amd64.go rename to vendor/github.com/klauspost/compress/snappy/encode_amd64.go diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s new file mode 100644 index 00000000..adfd979f --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/klauspost/compress/snappy/encode_other.go new file mode 100644 index 00000000..dbcae905 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/vendor/github.com/klauspost/compress/snappy/runbench.cmd new file mode 100644 index 00000000..d24eb4b4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/runbench.cmd @@ -0,0 +1,2 @@ +del old.txt +go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go new file mode 100644 index 00000000..74a36689 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 00000000..7680bfe1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,417 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd + + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently, but each call will only run on a single goroutine. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73101992 643 313.87 +silesia.tar zskp 2 211947520 67504318 969 208.38 +silesia.tar zskp 3 211947520 65177448 1899 106.44 +silesia.tar zskp 4 211947520 61381950 8115 24.91 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1654 122.21 +silesia.tar gzkp 1 211947520 80369488 1168 173.06 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 235022249 3088 590.30 +gob-stream zskp 2 1911399616 205669791 3786 481.34 +gob-stream zskp 3 1911399616 185792019 9324 195.48 +gob-stream zskp 4 1911399616 171537212 32113 56.76 +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 +gob-stream gzstd 1 1911399616 357382641 10251 177.82 +gob-stream gzkp 1 1911399616 362156523 5695 320.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343848582 3609 264.18 +enwik9 zskp 2 1000000000 317276632 5746 165.97 +enwik9 zskp 3 1000000000 294540704 11725 81.34 +enwik9 zskp 4 1000000000 276609671 44029 21.66 +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 +enwik9 gzstd 1 1000000000 382578136 9604 99.30 +enwik9 gzkp 1 1000000000 383825945 6544 145.73 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 +github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 +github-june-2days-2019.json zskp 3 6273951764 537511906 29252 204.54 +github-june-2days-2019.json zskp 4 6273951764 512796117 97791 61.18 +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 +github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 +github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 +rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 +rawstudio-mint14.tar zskp 3 8558382592 3224594213 71751 113.75 +rawstudio-mint14.tar zskp 4 8558382592 3027332295 486243 16.79 +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 +rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 +rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 +nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 +nyc-taxi-data-10M.csv zskp 3 3325605752 538490114 19880 159.53 +nyc-taxi-data-10M.csv zskp 4 3325605752 495986829 89368 35.49 +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 +nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 +``` + +## Decompressor + +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. +See "Allocation-less operation" below. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder operates on + +* One goroutine reads input and splits the input to several block decoders. +* A number of decoders will decode blocks. +* A goroutine coordinates these blocks and sends history from one to the next. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 2 cores effectively. + + +### Benchmarks + +These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). + +The first two are streaming decodes and the last are smaller inputs. + +``` +BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op +BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op + +BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op +BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op + +Concurrent performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op + +BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2020, but this may be out of date. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 00000000..85445853 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) int { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return int(v) +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 00000000..303ae90f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,169 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 32 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 00000000..b51d922b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,739 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header + maxCompressedLiteralSize = 1 << 18 + maxRLELiteralSize = 1 << 20 + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + history chan *history + input chan struct{} + result chan decodeOutput + sequenceBuf []seq + err error + decWG sync.WaitGroup + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + // Block is RLE, this is the size. + RLESize uint32 + tmp [4]byte + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + result: make(chan decodeOutput, 1), + input: make(chan struct{}, 1), + history: make(chan *history, 1), + } + b.decWG.Add(1) + go b.startDecoder() + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp := br.readSmall(3) + if tmp == nil { + if debug { + println("Reading block header:", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxBlockSize + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debug { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSize + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debug { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + case blockTypeRaw: + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if cap(b.dataStorage) < cSize { + if b.lowMem { + b.dataStorage = make([]byte, 0, cSize) + } else { + b.dataStorage = make([]byte, 0, maxBlockSize) + } + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + var err error + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debug { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err + b.input <- struct{}{} +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { + close(b.input) + close(b.history) + close(b.result) + b.decWG.Wait() +} + +// decodeAsync will prepare decoding the block when it receives input. +// This will separate output and history. +func (b *blockDec) startDecoder() { + defer b.decWG.Done() + for range b.input { + //println("blockDec: Got block input") + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + o := decodeOutput{ + d: b, + b: b.dst[:b.RLESize], + err: nil, + } + v := b.data[0] + for i := range o.b { + o.b[i] = v + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeRaw: + o := decodeOutput{ + d: b, + b: b.data, + err: nil, + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeCompressed: + b.dst = b.dst[:0] + err := b.decodeCompressed(nil) + o := decodeOutput{ + d: b, + b: b.dst, + err: err, + } + if debug { + println("Decompressed to", len(b.dst), "bytes, error:", err) + } + b.result <- o + case blockTypeReserved: + // Used for returning errors. + <-b.history + b.result <- decodeOutput{ + d: b, + b: nil, + err: b.err, + } + default: + panic("Invalid block type") + } + if debug { + println("blockDec: Finished block") + } + } +} + +// decodeAsync will prepare decoding the block when it receives the history. +// If history is provided, it will not fetch it from the channel. +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + b.dst = hist.b + hist.b = nil + err := b.decodeCompressed(hist) + if debug { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + hist.b = b.dst + b.dst = saved + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +// decodeCompressed will start decompressing a block. +// If no history is supplied the decoder will decodeAsync as much as possible +// before fetching from blockDec.history +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + delayedHistory := hist == nil + + if delayedHistory { + // We must always grab history. + defer func() { + if hist == nil { + <-b.history + } + }() + } + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return ErrBlockTooSmall + } + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debug { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + var literals []byte + var huff *huff0.Scratch + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize) + } else { + if litRegenSize > maxCompressedLiteralSize { + // Exceptional + b.literalBuf = make([]byte, litRegenSize) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) + + } + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debug { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debug { + printf("Found %d compressed literals\n", litCompSize) + } + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + huff = huffDecoderPool.Get().(*huff0.Scratch) + var err error + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + if huff == nil { + huff = &huff0.Scratch{} + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return err + } + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + if debug { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + seqHeader := in[0] + nSeqs := 0 + switch { + case seqHeader == 0: + in = in[1:] + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + // Allocate sequences + if cap(b.sequenceBuf) < nSeqs { + if b.lowMem { + b.sequenceBuf = make([]seq, nSeqs) + } else { + // Allocate max + b.sequenceBuf = make([]seq, nSeqs, maxSequences) + } + } else { + // Reuse buffer + b.sequenceBuf = b.sequenceBuf[:nSeqs] + } + var seqs = &sequenceDecs{} + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debug { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debug { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + dec := fseDecoderPool.Get().(*fseDecoder) + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + dec.setRLE(symb) + seq.fse = dec + if debug { + printf("RLE set to %+v, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + dec := fseDecoderPool.Get().(*fseDecoder) + err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + seq.fse = dec + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + + // Wait for history. + // All time spent after this is critical since it is strictly sequential. + if hist == nil { + hist = <-b.history + if hist.error { + return ErrDecoderClosed + } + } + + // Decode treeless literal block. + if litType == literalsBlockTreeless { + // TODO: We could send the history early WITHOUT the stream history. + // This would allow decoding treeless literals before the byte history is available. + // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. + // So not much obvious gain here. + + if hist.huffTree == nil { + return errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + var err error + // Use our out buffer. + huff = hist.huffTree + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return err + } + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + } else { + if hist.huffTree != nil && huff != nil { + if hist.dict == nil || hist.dict.litEnc != hist.huffTree { + huffDecoderPool.Put(hist.huffTree) + } + hist.huffTree = nil + } + } + if huff != nil { + hist.huffTree = huff + } + if debug { + println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + } + + if nSeqs == 0 { + // Decompressed content is defined entirely as Literals Section content. + b.dst = append(b.dst, literals...) + if delayedHistory { + hist.append(literals) + } + return nil + } + + seqs, err := seqs.mergeHistory(&hist.decoders) + if err != nil { + return err + } + if debug { + println("History merged ok") + } + br := &bitReader{} + if err := br.init(in); err != nil { + return err + } + + // TODO: Investigate if sending history without decoders are faster. + // This would allow the sequences to be decoded async and only have to construct stream history. + // If only recent offsets were not transferred, this would be an obvious win. + // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history any more. + if hist.dict != nil { + hist.dict.content = nil + } + } + + if err := seqs.initialize(br, hist, literals, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + + err = seqs.decode(nSeqs, br, hbytes) + if err != nil { + return err + } + if !br.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) + } + + err = br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = seqs.out + seqs.out, seqs.literals, seqs.hist = nil, nil, nil + + if !delayedHistory { + // If we don't have delayed history, no need to update. + hist.recentOffsets = seqs.prevOffset + return nil + } + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } + hist.append(b.dst) + hist.recentOffsets = seqs.prevOffset + if debug { + println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") + } + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 00000000..c85c4025 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,855 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + last bool + + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if cap(b.literals) < maxCompressedLiteralSize { + b.literals = make([]byte, 0, maxCompressedLiteralSize) + } + const defSeqs = 200 + b.literals = b.literals[:0] + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debug { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debug { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debug { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debug { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debug { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debug { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debug { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + default: + return err + case nil: + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debug { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram()[:256] + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 5) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debug { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debug { + println("Adding literals RLE") + } + default: + if debug { + println("Adding literals ERROR:", err) + } + return err + case nil: + // Compressed litLen... + if reUsed { + if debug { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debug { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debug { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debug { + println("Adding literals compressed") + } + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debug { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debug { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debug { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debug { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debug { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debug { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debug { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debug { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { + // No need to flush (common) + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is 8 for all. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // We checked that all can stay within 32 bits + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } else { + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is below 8 for each. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // ml+ll = max 32 bits total + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } + ml.flush(mlEnc.actualTableLog) + of.flush(ofEnc.actualTableLog) + ll.flush(llEnc.actualTableLog) + err = wr.close() + if err != nil { + return err + } + b.output = wr.out + + if len(b.output)-3-bhOffset >= b.size { + // Maybe even add a bigger margin. + b.litEnc.Reuse = huff0.ReusePolicyNone + return errIncompressible + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debug { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram()[:256] + ofH := b.coders.ofEnc.Histogram()[:256] + mlH := b.coders.mlEnc.Histogram()[:256] + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i, seq := range b.sequences { + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + b.sequences[i] = seq + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 00000000..01a01e48 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 00000000..658ef783 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,127 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" + "io/ioutil" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns nil if no more input is available. + readSmall(n int) []byte + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil + } + r := bb[:n] + *b = bb[n:] + return r +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) remain() []byte { + return *b +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, nil + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int) error { + bb := *b + if len(bb) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if n2 != n { + if debug { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil + } + return r.tmp[:n] +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := r.r.Read(r.tmp[:1]) + if err != nil { + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int) error { + n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) + if n2 != int64(n) { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 00000000..2c4fca17 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,88 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 00000000..87896c5e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,202 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "bytes" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // Window Size the window of data to keep while decoding. + // Will only be set if HasFCS is false. + WindowSize uint64 + + // Frame content size. + // Expected size of the entire frame. + FrameContentSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // Skippable will be true if the frame is meant to be skipped. + // No other information will be populated. + Skippable bool + + // If set there is a checksum present for the block content. + HasCheckSum bool + + // If this is true FrameContentSize will have a valid value + HasFCS bool + + SingleSegment bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + b, in := in[:4], in[4:] + if !bytes.Equal(b, frameMagic) { + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + return ErrMagicMismatch + } + *h = Header{Skippable: true} + return nil + } + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + + // Clear output + *h = Header{} + fhd, in := in[0], in[1:] + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("Reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if !h.SingleSegment { + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + if b == nil { + return io.ErrUnexpectedEOF + } + switch size { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + if b == nil { + return io.ErrUnexpectedEOF + } + switch fcsSize { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return nil + } + tmp, in := in[:3], in[3:] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 00000000..1d41c25d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,561 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Streams ready to be decoded. + stream chan decodeStream + + // Current read position used for Reader functionality. + current decoderState + + // Custom dictionaries. + // Always uses copies. + dicts map[uint32]dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel chan struct{} + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + if d.stream == nil { + return 0, ErrDecoderNilInput + } + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, nil + } + } + } + if len(d.current.b) > 0 { + if debug { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debug { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + if r == nil { + d.current.err = ErrDecoderNilInput + d.current.flushed = true + return nil + } + + if d.stream == nil { + d.stream = make(chan decodeStream, 1) + d.streamWg.Add(1) + go d.startStreamDecoder(d.stream) + } + + // If bytes buffer and < 1MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < 1<<20 { + var bb2 byter + bb2 = bb + if debug { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.current.b) > 0 { + dst = d.current.b + } + + dst, err := d.DecodeAll(b, dst[:0]) + if err == nil { + err = io.EOF + } + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debug { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + + // Remove current block. + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.cancel = make(chan struct{}) + d.current.flushed = false + d.current.d = nil + + d.stream <- decodeStream{ + r: r, + output: d.current.output, + cancel: d.current.cancel, + } + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + println("cancelling current") + close(d.current.cancel) + d.current.cancel = nil + } + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for { + select { + case v := <-d.current.output: + if v.d != nil { + if debug { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + if v.err == errEndOfStream { + println("current flushed") + d.current.flushed = true + return + } + } + } +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + if d.stream == nil { + return 0, ErrDecoderNilInput + } + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && d.current.err == nil { + d.current.err = err2 + break + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.current.err == ErrDecoderClosed { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + defer func() { + if debug { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err == io.EOF { + if debug { + println("frame reset return EOF") + } + return dst, nil + } + if frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + return nil, ErrUnknownDictionary + } + frame.history.setDict(&dict) + } + if err != nil { + return dst, err + } + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } + if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { + // Never preallocate moe than 1 GB up front. + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + copy(dst2, dst) + dst = dst2 + } + } + if cap(dst) == 0 { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if len(frame.bBuf) == 0 { + if debug { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } + if d.current.err != nil { + // Keep error state. + return blocking + } + + if blocking { + d.current.decodeOutput = <-d.current.output + } else { + select { + case d.current.decodeOutput = <-d.current.output: + default: + return false + } + } + if debug { + println("got", len(d.current.b), "bytes, error:", d.current.err) + } + return true +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.stream != nil { + close(d.stream) + d.streamWg.Wait() + d.stream = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +type decodeStream struct { + r io.Reader + + // Blocks ready to be written to output. + output chan decodeOutput + + // cancel reading from the input + cancel chan struct{} +} + +// errEndOfStream indicates that everything from the stream was read. +var errEndOfStream = errors.New("end-of-stream") + +// Create Decoder: +// Spawn n block decoders. These accept tasks to decode a block. +// Create goroutine that handles stream processing, this will send history to decoders as they are available. +// Decoders update the history as they decode. +// When a block is returned: +// a) history is sent to the next decoder, +// b) content written to CRC. +// c) return data to WRITER. +// d) wait for next block to return data. +// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. +func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { + defer d.streamWg.Done() + frame := newFrameDec(d.o) + for stream := range inStream { + if debug { + println("got new stream") + } + br := readerWrapper{r: stream.r} + decodeStream: + for { + frame.history.reset() + err := frame.reset(&br) + if debug && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err != nil { + stream.output <- decodeOutput{ + err: err, + } + break + } + if debug { + println("starting frame decoder") + } + + // This goroutine will forward history between frames. + frame.frameDone.Add(1) + frame.initAsync() + + go frame.startDecoder(stream.output) + decodeFrame: + // Go through all blocks of the frame. + for { + dec := <-d.decoders + select { + case <-stream.cancel: + if !frame.sendErr(dec, io.EOF) { + // To not let the decoder dangle, send it back. + stream.output <- decodeOutput{d: dec} + } + break decodeStream + default: + } + err := frame.next(dec) + switch err { + case io.EOF: + // End of current frame, no error + println("EOF on next block") + break decodeFrame + case nil: + continue + default: + println("block decoder returned", err) + break decodeStream + } + } + // All blocks have started decoding, check if there are more frames. + println("waiting for done") + frame.frameDone.Wait() + println("done waiting...") + } + frame.frameDone.Wait() + println("Sending EOS") + stream.output <- decodeOutput{err: errEndOfStream} + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 00000000..284d3844 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,84 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + dicts []dict +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + } + o.maxDecodedSize = 1 << 63 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n <= 0 { + return fmt.Errorf("Concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// For streaming operations, the maximum window size is capped at 1<<30 bytes. +// Maximum and default is 1 << 63 bytes. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// If several dictionaries with the same ID is provided the last one will be used. +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, *d) + } + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 00000000..fa25a18d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,122 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte +} + +var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// DictContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) DictContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if !bytes.Equal(b[:4], dictMagic[:]) { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, err + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 00000000..b1b7c6e6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,155 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int) int32 { + if size > 0 && size < int(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + l := e.maxMatchOff * 2 + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } else { + if cap(e.hist) < int(e.maxMatchOff*2) { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if (!singleBlock || d.DictContentSize() > 0) && cap(e.hist) < int(e.maxMatchOff*2)+d.DictContentSize() { + l := e.maxMatchOff*2 + int32(d.DictContentSize()) + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 00000000..c4baa42c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,484 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math/bits" +) + +const ( + bestLongTableBits = 20 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 16 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table +) + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = prevEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 12 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + _ = addLiterals + + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + type match struct { + offset int32 + s int32 + length int32 + rep int32 + } + matchAt := func(offset int32, s int32, first uint32, rep int32) match { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return match{offset: offset, s: s} + } + return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + } + + bestOf := func(a, b match) match { + aScore := b.s - a.s + a.length + bScore := a.s - b.s + b.length + if a.rep < 0 { + aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8 + } + if b.rep < 0 { + bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8 + } + if aScore >= bScore { + return a + } + return b + } + const goodEnough = 100 + + nextHashL := hash8(cv, bestLongTableBits) + nextHashS := hash4x64(cv, bestShortTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + if canRepeat && best.length < goodEnough { + best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1)) + best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2)) + best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3)) + best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1)) + best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2)) + best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3)) + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + continue + } + + s++ + candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)] + cv = load6432(src, s) + cv2 := load6432(src, s+1) + candidateL = e.longTable[hash8(cv, bestLongTableBits)] + candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)] + + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) + best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + } + + // We have a match, we can store the forward value + if best.rep > 0 { + s = best.s + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := best.s + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + repIndex := best.offset + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = uint32(best.rep) + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + s = best.s + best.length + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, best.length) + + } + break encodeLoop + } + // Index skipped... + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hash8(cv0, bestLongTableBits) + h1 := hash4x64(cv0, bestShortTableBits) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + switch best.rep { + case 2: + offset1, offset2 = offset2, offset1 + case 3: + offset1, offset2, offset3 = offset3, offset1, offset2 + } + cv = load6432(src, s) + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + s = best.s + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := best.length + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + // every entry + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hash8(cv0, bestLongTableBits) + h1 := hash4x64(cv0, bestShortTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash4x64(cv, bestShortTableBits) + nextHashL := hash8(cv, bestLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.Encode(blk, src) +} + +// ResetDict will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash4x64(cv, hashLog) // 0 -> 4 + nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5 + nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6 + nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hash8(cv, bestLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hash8(cv, bestLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 00000000..94a5343d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,595 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry + dictTable []tableEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, betterLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.Encode(blk, src) +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash5(cv, hashLog) // 0 -> 4 + nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5 + nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 + nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hash8(cv, betterLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hash8(cv, betterLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 00000000..19eebf66 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,713 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv1>>8, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + } + // Reset table to initial state + e.cur = e.maxMatchOff + copy(e.longTable[:], e.dictLongTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 00000000..0b301df4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,661 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "math/bits" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry + dictTable []tableEntry +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debug { + if len(src) > maxBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + // length := 4 + e.matchlen(s+6, repIndex+4, src) + // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) + var length int32 + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + //l := e.matchlenNoHist(s+4, t+4, src) + 4 + // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlenNoHist(s+4, o2+4, src) + // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 3 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash6(cv, hashLog) // 0 -> 5 + nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6 + nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + } + } + e.lastDictID = d.id + } + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 00000000..f5759211 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,570 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + if s.writing == nil { + s.writing = &blockEnc{} + s.writing.init() + } + s.writing.initNewEncode() + s.filling = s.filling[:0] + s.current = s.current[:0] + s.previous = s.previous[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.writeErr = nil +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: 0, + WindowSize: uint32(s.encoder.WindowSize(0)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.wg.Add(1) + go func(src []byte) { + if debug { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.writeErr = err + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debug { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debug { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + default: + if debug { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + case nil: + } + if len(src) > 0 { + if debug { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below 1MB. + single := len(src) < 1<<20 && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(len(src))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= maxCompressedBlockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, src) + case nil: + dst = blk.output + default: + panic(err) + } + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 00000000..a7312f42 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,290 @@ +package zstd + +import ( + "errors" + "fmt" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + // use less ram: true for now, but may change. + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: 1 << 16, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: true, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedDefault: + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}} + case SpeedBetterCompression: + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + case SpeedFastest: + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + o.pad = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + case level >= 10: + return SpeedBetterCompression + } + return SpeedDefault +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + case SpeedBestCompression: + o.windowSize = 32 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedFastest + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// The encoder *may* choose to use no dictionary instead for certain payloads. +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 00000000..fc4a566d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,494 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/hex" + "errors" + "hash" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc hash.Hash64 + offset int64 + + WindowSize uint64 + + // maxWindowSize is the maximum windows size to support. + // should never be bigger than max-int. + maxWindowSize uint64 + + // In order queue of blocks being decoded. + decoding chan *blockDec + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + frameDone sync.WaitGroup + + DictionaryID *uint32 + HasCheckSum bool + SingleSegment bool + + // asyncRunning indicates whether the async routine processes input on 'decoding'. + asyncRunningMu sync.Mutex + asyncRunning bool +} + +const ( + // The minimum Window_Size is 1 KB. + MinWindowSize = 1 << 10 + MaxWindowSize = 1 << 29 +) + +var ( + frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} + skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +) + +func newFrameDec(o decoderOptions) *frameDec { + d := frameDec{ + o: o, + maxWindowSize: MaxWindowSize, + } + if d.maxWindowSize > o.maxDecodedSize { + d.maxWindowSize = o.maxDecodedSize + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var b []byte + for { + b = br.readSmall(4) + if b == nil { + return io.EOF + } + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + if debug { + println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) + } + // Break if not skippable frame. + break + } + // Read size to skip + b = br.readSmall(4) + if b == nil { + println("Reading Frame Size EOF") + return io.ErrUnexpectedEOF + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err := br.skipN(int(n)) + if err != nil { + if debug { + println("Reading discarded frame", err) + } + return err + } + } + if !bytes.Equal(b, frameMagic) { + println("Got magic numbers: ", b, "want:", frameMagic) + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + println("Reading Frame_Header_Descriptor", err) + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("Reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + println("Reading Window_Descriptor", err) + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = nil + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + b = br.readSmall(int(size)) + if b == nil { + if debug { + println("Reading Dictionary_ID", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + var id uint32 + switch size { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debug { + println("Dict size", size, "ID:", id) + } + if id > 0 { + // ID 0 means "sorry, no dictionary anyway". + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format + d.DictionaryID = &id + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = 0 + if fcsSize > 0 { + b := br.readSmall(fcsSize) + if b == nil { + println("Reading Frame content", io.ErrUnexpectedEOF) + return io.ErrUnexpectedEOF + } + switch fcsSize { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debug { + println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + } + } + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + } + + if d.WindowSize > d.maxWindowSize { + printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize) + return ErrWindowSizeExceeded + } + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + println("got window size: ", d.WindowSize) + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if d.o.lowMem && d.history.windowSize < maxBlockSize { + d.history.maxSize = d.history.windowSize * 2 + } else { + d.history.maxSize = d.history.windowSize + maxBlockSize + } + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debug { + printf("decoding new block %p:%p", block, block.data) + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + d.sendErr(block, err) + return err + } + block.input <- struct{}{} + if debug { + println("next block:", block) + } + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return nil + } + if block.Last { + // We indicate the frame is done by sending io.EOF + d.decoding <- block + return io.EOF + } + d.decoding <- block + return nil +} + +// sendEOF will queue an error block on the frame. +// This will cause the frame decoder to return when it encounters the block. +// Returns true if the decoder was added. +func (d *frameDec) sendErr(block *blockDec, err error) bool { + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return false + } + + println("sending error", err.Error()) + block.sendErr(err) + d.decoding <- block + return true +} + +// checkCRC will check the checksum if the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + if !d.HasCheckSum { + return nil + } + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + + // We can overwrite upper tmp now + want := d.rawInput.readSmall(4) + if want == nil { + println("CRC missing?") + return io.ErrUnexpectedEOF + } + + if !bytes.Equal(tmp[:], want) { + if debug { + println("CRC Check Failed:", tmp[:], "!=", want) + } + return ErrCRCMismatch + } + if debug { + println("CRC ok", tmp[:]) + } + return nil +} + +func (d *frameDec) initAsync() { + if !d.o.lowMem && !d.SingleSegment { + // set max extra size history to 10MB. + d.history.maxSize = d.history.windowSize + maxBlockSize*5 + } + // re-alloc if more than one extra block size. + if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.history.b) < d.history.maxSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.decoding) < d.o.concurrent { + d.decoding = make(chan *blockDec, d.o.concurrent) + } + if debug { + h := d.history + printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) + } + d.asyncRunningMu.Lock() + d.asyncRunning = true + d.asyncRunningMu.Unlock() +} + +// startDecoder will start decoding blocks and write them to the writer. +// The decoder will stop as soon as an error occurs or at end of frame. +// When the frame has finished decoding the *bufio.Reader +// containing the remaining input will be sent on frameDec.frameDone. +func (d *frameDec) startDecoder(output chan decodeOutput) { + written := int64(0) + + defer func() { + d.asyncRunningMu.Lock() + d.asyncRunning = false + d.asyncRunningMu.Unlock() + + // Drain the currently decoding. + d.history.error = true + flushdone: + for { + select { + case b := <-d.decoding: + b.history <- &d.history + output <- <-b.result + default: + break flushdone + } + } + println("frame decoder done, signalling done") + d.frameDone.Done() + }() + // Get decoder for first block. + block := <-d.decoding + block.history <- &d.history + for { + var next *blockDec + // Get result + r := <-block.result + if r.err != nil { + println("Result contained error", r.err) + output <- r + return + } + if debug { + println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) + d.offset += int64(len(r.b)) + } + if !block.Last { + // Send history to next block + select { + case next = <-d.decoding: + if debug { + println("Sending ", len(d.history.b), "bytes as history") + } + next.history <- &d.history + default: + // Wait until we have sent the block, so + // other decoders can potentially get the decoder. + next = nil + } + } + + // Add checksum, async to decoding. + if d.HasCheckSum { + n, err := d.crc.Write(r.b) + if err != nil { + r.err = err + if n != len(r.b) { + r.err = io.ErrShortWrite + } + output <- r + return + } + } + written += int64(len(r.b)) + if d.SingleSegment && uint64(written) > d.FrameContentSize { + println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) + r.err = ErrFrameSizeExceeded + output <- r + return + } + if block.Last { + r.err = d.checkCRC() + output <- r + return + } + output <- r + if next == nil { + // There was no decoder available, we wait for one now that we have sent to the writer. + if debug { + println("Sending ", len(d.history.b), " bytes as history") + } + next = <-d.decoding + next.history <- &d.history + } + block = next + } +} + +// runDecoder will create a sync decoder that will decode a block of data. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + // Store input length, so we only check new data. + crcStart := len(dst) + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debug { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil || dec.Last { + break + } + if uint64(len(d.history.b)) > d.o.maxDecodedSize { + err = ErrDecoderSizeExceeded + break + } + if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { + println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + err = ErrFrameSizeExceeded + break + } + } + dst = d.history.b + if err == nil { + if d.HasCheckSum { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 00000000..4ef7f5a3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 00000000..e6d3d49b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,385 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + // println(s.norm[:s.symbolLen], s.symbolLen) + return s.buildDtable() +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baseline() uint32 { + return uint32(d >> 32) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { + *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setBaseline(baseline uint32) { + const mask = 0xffffffff + *d = (*d & mask) | decSymbol(baseline)<<32 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// next returns the current symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) next(br *bitReader) { + lowBits := uint16(br.getBits(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (s *fseState) finished(br *bitReader) bool { + return br.finished() && s.state.nbBits() > 0 +} + +// final returns the current state symbol without decoding the next. +func (s *fseState) final() (int, uint8) { + return s.state.baselineInt(), s.state.addBits() +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { + lowBits := uint16(br.getBitsFast(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] + return s.state.baseline(), s.state.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 00000000..aa9eba88 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,726 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *fseEncoder) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *fseEncoder) prepare() (*fseEncoder, error) { + if s == nil { + s = &fseEncoder{} + } + s.useRLE = false + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + return s, nil +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < int(tableSize) { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int16(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int16(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debug { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = uint32(total >> tableLog) + lowOne = uint32((total * 3) >> (tableLog + 1)) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = uint32((total * 3) / (toDistribute * 2)) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 00000000..6c17dc17 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic(fmt.Sprintf("invalid decoding table, base overflows int32")) + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 00000000..4a752067 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,77 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. +// l must be >=4 and <=8. Any other value will return hash for 4 bytes. +// h should always be <32. +// Preferably h and l should be a constant. +// FIXME: This does NOT get resolved, if 'mls' is constant, +// so this cannot be used. +func hashLen(u uint64, hashLog, mls uint8) uint32 { + switch mls { + case 5: + return hash5(u, hashLog) + case 6: + return hash6(u, hashLog) + case 7: + return hash7(u, hashLog) + case 8: + return hash8(u, hashLog) + default: + return hash4x64(u, hashLog) + } +} + +// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash3(u uint32, h uint8) uint32 { + return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> ((32 - h) & 31) +} + +// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4x64(u uint64, h uint8) uint32 { + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 00000000..f783e32d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,89 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + b []byte + huffTree *huff0.Scratch + recentOffsets [3]int + decoders sequenceDecs + windowSize int + maxSize int + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.offsets.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + h.decoders = sequenceDecs{} + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + } + } + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 00000000..24b53065 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 00000000..69aa3bb5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,58 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 00000000..426b9cac --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,238 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go new file mode 100644 index 00000000..35318d7c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(*Digest, []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 00000000..2c9c5357 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ arg1_base+8(FP), CX + MOVQ arg1_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ arg+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ arg1_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 00000000..4a5a8216 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 00000000..6f3b0cb1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 00000000..1dd39e63 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,492 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data\n", seqs-i) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(s.out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size", size) + } + if size > cap(s.out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(s.out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + s.out = append(s.out, s.literals[:ll]...) + s.literals = s.literals[ll:] + out := s.out + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(s.out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + mo -= len(s.dict) - dictO + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(s.out); v > 0 { + // v is the start position in history from end. + start := len(s.hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, s.hist[start:]...) + mo -= v + ml -= v + } else { + out = append(out, s.hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(s.out) - mo + if ml <= len(s.out)-start { + // No overlap + out = append(out, s.out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + s.out = out + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + // Add final literals + s.out = append(s.out, s.literals...) + return nil +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) update(br *bitReader) { + // Max 8 bits + s.litLengths.state.next(br) + // Max 9 bits + s.matchLengths.state.next(br) + // Max 8 bits + s.offsets.state.next(br) +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) updateAlt(br *bitReader) { + // Update all 3 states at once. Approx 20% faster. + a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + + nBits := a.nbBits() + b.nbBits() + c.nbBits() + if nBits == 0 { + s.litLengths.state.state = s.litLengths.state.dt[a.newState()] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] + s.offsets.state.state = s.offsets.state.dt[c.newState()] + return + } + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) + s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] + + lowBits = uint16(bits >> (c.nbBits() & 31)) + lowBits &= bitMask[b.nbBits()&15] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] + + lowBits = uint16(bits) & bitMask[c.nbBits()&15] + s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] +} + +// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. +func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + return + } + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + return + } + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + return +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} + +// mergeHistory will merge history. +func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { + for i := uint(0); i < 3; i++ { + var sNew, sHist *sequenceDec + switch i { + default: + // same as "case 0": + sNew = &s.litLengths + sHist = &hist.litLengths + case 1: + sNew = &s.offsets + sHist = &hist.offsets + case 2: + sNew = &s.matchLengths + sHist = &hist.matchLengths + } + if sNew.repeat { + if sHist.fse == nil { + return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) + } + continue + } + if sNew.fse == nil { + return nil, fmt.Errorf("sequence stream %d, no fse found", i) + } + if sHist.fse != nil && !sHist.fse.preDefined { + fseDecoderPool.Put(sHist.fse) + } + sHist.fse = sNew.fse + } + return hist, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 00000000..36bcc3cc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,115 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + return + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 00000000..841fd95a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,436 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/snappy" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + println("snappy.Decode:", err) + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debug { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debug { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 00000000..9056beef --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,156 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "errors" + "log" + "math" + "math/bits" +) + +// enable debug printing +const debug = false + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// Reset the buffer offset when reaching this. +const bufferReset = math.MaxInt32 - MaxWindowSize + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + // For the time being dictionaries are not supported. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug { + log.Printf(format, a...) + } +} + +// matchLenFast does matching, but will not match the last up to 7 bytes. +func matchLenFast(a, b []byte) int { + endI := len(a) & (math.MaxInt32 - 7) + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + bits.TrailingZeros64(diff)>>3 + } + } + return endI +} + +// matchLen returns the maximum length. +// a must be the shortest of the two. +// The function also returns whether all bytes matched. +func matchLen(a, b []byte) int { + b = b[:len(a)] + for i := 0; i < len(a)-7; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + (bits.TrailingZeros64(diff) >> 3) + } + } + + checked := (len(a) >> 3) << 3 + a = a[checked:] + b = b[checked:] + for i := range a { + if a[i] != b[i] { + return i + checked + } + } + return len(a) + checked +} + +func load3232(b []byte, i int32) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func load64(b []byte, i int) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md index 949b77e3..09a4a35c 100644 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md @@ -26,6 +26,8 @@ The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de). We thank all the authors who provided code to this library: * Felix Kollmann +* Nicolas Perraut +* @dirty49374 ## License diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go index ef18d8f9..57f530ae 100644 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go @@ -4,7 +4,6 @@ package sequences import ( "syscall" - "unsafe" ) var ( @@ -27,7 +26,7 @@ func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error { mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING } - ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode)) + ret, _, err := setConsoleMode.Call(uintptr(stream), uintptr(mode)) if ret == 0 { return err } diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go new file mode 100644 index 00000000..df61a6f2 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go @@ -0,0 +1,11 @@ +// +build linux darwin + +package sequences + +import ( + "fmt" +) + +func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error { + return fmt.Errorf("windows only package") +} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_test.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_test.go deleted file mode 100644 index aad41c5c..00000000 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build windows - -package sequences - -import ( - "fmt" - "os" - "syscall" - "testing" -) - -func TestStdoutSequencesOn(t *testing.T) { - err := EnableVirtualTerminalProcessing(syscall.Stdout, true) - if err != nil { - t.Fatalf("Failed to enable VTP: %v", err) - } - defer EnableVirtualTerminalProcessing(syscall.Stdout, false) - - fmt.Fprintf(os.Stdout, "\x1b[34mHello \x1b[35mWorld\x1b[0m!\n") -} - -func TestStdoutSequencesOff(t *testing.T) { - err := EnableVirtualTerminalProcessing(syscall.Stdout, false) - if err != nil { - t.Fatalf("Failed to enable VTP: %v", err) - } - - fmt.Fprintf(os.Stdout, "\x1b[34mHello \x1b[35mWorld\x1b[0m!\n") -} - -func TestStderrSequencesOn(t *testing.T) { - err := EnableVirtualTerminalProcessing(syscall.Stderr, true) - if err != nil { - t.Fatalf("Failed to enable VTP: %v", err) - } - defer EnableVirtualTerminalProcessing(syscall.Stderr, false) - - fmt.Fprintf(os.Stderr, "\x1b[34mHello \x1b[35mWorld\x1b[0m!\n") -} - -func TestStderrSequencesOff(t *testing.T) { - err := EnableVirtualTerminalProcessing(syscall.Stderr, false) - if err != nil { - t.Fatalf("Failed to enable VTP: %v", err) - } - - fmt.Fprintf(os.Stderr, "\x1b[34mHello \x1b[35mWorld\x1b[0m!\n") -} diff --git a/vendor/github.com/krallistic/kazoo-go/functional_cluster_test.go b/vendor/github.com/krallistic/kazoo-go/functional_cluster_test.go deleted file mode 100644 index e679254e..00000000 --- a/vendor/github.com/krallistic/kazoo-go/functional_cluster_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package kazoo - -import ( - "fmt" - "net" - "os" - "strings" - "testing" - "time" -) - -var ( - // By default, assume we're using Sarama's vagrant cluster when running tests - zookeeperPeers []string = []string{"192.168.100.67:2181", "192.168.100.67:2182", "192.168.100.67:2183", "192.168.100.67:2184", "192.168.100.67:2185"} -) - -func init() { - if zookeeperPeersEnv := os.Getenv("ZOOKEEPER_PEERS"); zookeeperPeersEnv != "" { - zookeeperPeers = strings.Split(zookeeperPeersEnv, ",") - } - - fmt.Printf("Using Zookeeper cluster at %v\n", zookeeperPeers) -} - -func TestBrokers(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - - brokers, err := kz.Brokers() - if err != nil { - t.Fatal(err) - } - - if len(brokers) == 0 { - t.Error("Expected at least one broker") - } - - for id, addr := range brokers { - if conn, err := net.DialTimeout("tcp", addr, 100*time.Millisecond); err != nil { - t.Errorf("Failed to connect to Kafka broker %d at %s", id, addr) - } else { - _ = conn.Close() - } - } - - assertSuccessfulClose(t, kz) -} - -func TestBrokerList(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - - brokers, err := kz.BrokerList() - if err != nil { - t.Fatal(err) - } - - if len(brokers) == 0 { - t.Error("Expected at least one broker") - } - - for _, addr := range brokers { - if conn, err := net.DialTimeout("tcp", addr, 100*time.Millisecond); err != nil { - t.Errorf("Failed to connect to Kafka broker at %s", addr) - } else { - _ = conn.Close() - } - } - - assertSuccessfulClose(t, kz) -} - -func TestController(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - - brokers, err := kz.Brokers() - if err != nil { - t.Fatal(err) - } - - controller, err := kz.Controller() - if err != nil { - t.Fatal(err) - } - - if _, ok := brokers[controller]; !ok { - t.Error("Expected the controller's BrokerID to be an existing one") - } - - assertSuccessfulClose(t, kz) -} diff --git a/vendor/github.com/krallistic/kazoo-go/functional_consumergroup_test.go b/vendor/github.com/krallistic/kazoo-go/functional_consumergroup_test.go deleted file mode 100644 index 49aa7156..00000000 --- a/vendor/github.com/krallistic/kazoo-go/functional_consumergroup_test.go +++ /dev/null @@ -1,658 +0,0 @@ -package kazoo - -import ( - "reflect" - "sync" - "testing" - "time" - - "github.com/samuel/go-zookeeper/zk" -) - -func TestConsumergroups(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - defer assertSuccessfulClose(t, kz) - - cg := kz.Consumergroup("test.kazoo.TestConsumergroups") - - cgs, err := kz.Consumergroups() - if err != nil { - t.Error(err) - } - originalCount := len(cgs) - - if cg := cgs.Find(cg.Name); cg != nil { - t.Error("Consumergoup `test.kazoo.TestConsumergroups` should not be found") - } - - if exists, _ := cg.Exists(); exists { - t.Error("Consumergoup `test.kazoo.TestConsumergroups` should not be registered yet") - } - - if err := cg.Create(); err != nil { - t.Error(err) - } - - if exists, _ := cg.Exists(); !exists { - t.Error("Consumergoup `test.kazoo.TestConsumergroups` should be registered now") - } - - cgs, err = kz.Consumergroups() - if err != nil { - t.Error(err) - } - - if len(cgs) != originalCount+1 { - t.Error("Should have one more consumergroup than at the start") - } - - if err := cg.Delete(); err != nil { - t.Error(err) - } - - if exists, _ := cg.Exists(); exists { - t.Error("Consumergoup `test.kazoo.TestConsumergroups` should not be registered anymore") - } - - cgs, err = kz.Consumergroups() - if err != nil { - t.Error(err) - } - - if len(cgs) != originalCount { - t.Error("Should have the original number of consumergroups again") - } -} - -func TestConsumergroupInstances(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - defer assertSuccessfulClose(t, kz) - - cg := kz.Consumergroup("test.kazoo.TestConsumergroupInstances") - if err := cg.Create(); err != nil { - t.Fatal(err) - } - defer func() { - if err := cg.Delete(); err != nil { - t.Error(err) - } - }() - - if instances, err := cg.Instances(); err != nil { - t.Error(err) - } else if len(instances) != 0 { - t.Fatal("Expected no active consumergroup instances") - } - - instance1 := cg.NewInstance() - // Make sure that the instance is unregistered. - if reg, err := instance1.Registration(); err != zk.ErrNoNode || reg != nil { - t.Errorf("Expected no registration: reg=%v, err=(%v)", reg, err) - } - - // Register a new instance - if instance1.ID == "" { - t.Error("It should generate a valid instance ID") - } - if err := instance1.Register([]string{"topic"}); err != nil { - t.Error(err) - } - - // Verify registration - reg, err := instance1.Registration() - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(reg.Subscription, map[string]int{"topic": 1}) { - t.Errorf("Unexpected registration: %v", reg) - } - - // Try to register an instance with the same ID. - if err := cg.Instance(instance1.ID).Register([]string{"topic"}); err != ErrInstanceAlreadyRegistered { - t.Error("The instance should already be registered") - } - - instance2 := cg.Instance("test") - if err := instance2.Register([]string{"topic"}); err != nil { - t.Error(err) - } - - time.Sleep(50 * time.Millisecond) - - if instances, err := cg.Instances(); err != nil { - t.Error(err) - } else { - if len(instances) != 2 { - t.Error("Expected 2 active consumergroup instances") - } - if i := instances.Find(instance1.ID); i == nil { - t.Error("Expected instance1 to be registered.") - } - if i := instances.Find(instance2.ID); i == nil { - t.Error("Expected instance2 to be registered.") - } - } - - // Deregister the two running instances - if err := instance1.Deregister(); err != nil { - t.Error(err) - } - if err := instance2.Deregister(); err != nil { - t.Error(err) - } - - // Try to deregister an instance that was not register - instance3 := cg.NewInstance() - if err := instance3.Deregister(); err != ErrInstanceNotRegistered { - t.Error("Expected new instance to not be registered") - } - - if instances, err := cg.Instances(); err != nil { - t.Error(err) - } else if len(instances) != 0 { - t.Error("Expected no active consumergroup instances") - } -} - -func TestConsumergroupInstanceCrash(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - defer assertSuccessfulClose(t, kz) - - cg := kz.Consumergroup("test.kazoo.TestConsumergroupInstancesEphemeral") - if err := cg.Create(); err != nil { - t.Fatal(err) - } - defer func() { - if err := cg.Delete(); err != nil { - t.Error(err) - } - }() - - // Create a kazoo instance on which we will simulate a crash. - config := NewConfig() - config.Timeout = 50 * time.Millisecond - crashingKazoo, err := NewKazoo(zookeeperPeers, config) - if err != nil { - t.Fatal(err) - } - crashingCG := crashingKazoo.Consumergroup(cg.Name) - - // Instantiate and register the instance. - instance := crashingCG.NewInstance() - if err := instance.Register([]string{"test.1"}); err != nil { - t.Error(err) - } - - time.Sleep(50 * time.Millisecond) - if instances, err := cg.Instances(); err != nil { - t.Error(err) - } else if len(instances) != 1 { - t.Error("Should have 1 running instance, found", len(instances)) - } - - // Simulate a crash, and wait for Zookeeper to pick it up - _ = crashingKazoo.Close() - time.Sleep(200 * time.Millisecond) - - if instances, err := cg.Instances(); err != nil { - t.Error(err) - } else if len(instances) != 0 { - t.Error("Should have 0 running instances") - } -} - -func TestConsumergroupWatchInstances(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - defer assertSuccessfulClose(t, kz) - - cg := kz.Consumergroup("test.kazoo.TestConsumergroupWatchInstances") - if err := cg.Create(); err != nil { - t.Fatal(err) - } - defer func() { - if err := cg.Delete(); err != nil { - t.Error(err) - } - }() - - instances, c, err := cg.WatchInstances() - if err != nil { - t.Fatal(err) - } - - if len(instances) != 0 { - t.Error("Expected 0 running instances") - } - - instance := cg.NewInstance() - if err := instance.Register([]string{"topic"}); err != nil { - t.Fatal(err) - } - - // The instance watch should have been triggered - <-c - - instances, c, err = cg.WatchInstances() - if err != nil { - t.Fatal(err) - } - - if len(instances) != 1 { - t.Error("Expected 1 running instance") - } - - if err := instance.Deregister(); err != nil { - t.Fatal(err) - } - - // The instance watch should have been triggered again - <-c - - instances, err = cg.Instances() - if err != nil { - t.Fatal(err) - } - - if len(instances) != 0 { - t.Error("Expected 0 running instances") - } -} - -func TestConsumergroupInstanceClaimPartition(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - defer assertSuccessfulClose(t, kz) - - cg := kz.Consumergroup("test.kazoo.TestConsumergroupInstanceClaimPartition") - if err := cg.Create(); err != nil { - t.Fatal(err) - } - defer func() { - if err := cg.Delete(); err != nil { - t.Error(err) - } - }() - - // Create two instances for this consumergroup - - i1 := cg.NewInstance() - if err := i1.Register([]string{"test.4"}); err != nil { - t.Fatal(err) - } - i2 := cg.NewInstance() - if err := i2.Register([]string{"test.4"}); err != nil { - t.Fatal(err) - } - - // Claim all partitions divided by instance 1 and 2 - - if err := i1.ClaimPartition("test.4", 0); err != nil { - t.Error(err) - } - if err := i1.ClaimPartition("test.4", 1); err != nil { - t.Error(err) - } - if err := i2.ClaimPartition("test.4", 2); err != nil { - t.Error(err) - } - if err := i2.ClaimPartition("test.4", 3); err != nil { - t.Error(err) - } - - // Try to claim more partitions - if err := i1.ClaimPartition("test.4", 3); err != ErrPartitionClaimedByOther { - t.Error("Expected ErrPartitionClaimedByOther to be returned, found", err) - } - - if err := i2.ClaimPartition("test.4", 0); err != ErrPartitionClaimedByOther { - t.Error("Expected ErrPartitionClaimedByOther to be returned, found", err) - } - - // Instance 1: release some partitions - - if err := i1.ReleasePartition("test.4", 0); err != nil { - t.Error(err) - } - if err := i1.ReleasePartition("test.4", 1); err != nil { - t.Error(err) - } - - // Instance 2: claim the released partitions - - if err := i2.ClaimPartition("test.4", 0); err != nil { - t.Error(err) - } - if err := i2.ClaimPartition("test.4", 1); err != nil { - t.Error(err) - } - - // Instance 2: release all partitions - - if err := i2.ReleasePartition("test.4", 0); err != nil { - t.Error(err) - } - if err := i2.ReleasePartition("test.4", 1); err != nil { - t.Error(err) - } - if err := i2.ReleasePartition("test.4", 2); err != nil { - t.Error(err) - } - if err := i2.ReleasePartition("test.4", 3); err != nil { - t.Error(err) - } - - if err := i1.Deregister(); err != nil { - t.Error(err) - } - if err := i2.Deregister(); err != nil { - t.Error(err) - } -} - -func TestConsumergroupInstanceClaimPartitionSame(t *testing.T) { - // Given - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - defer assertSuccessfulClose(t, kz) - - cg := kz.Consumergroup("test.kazoo.TestConsumergroupInstanceClaimPartition2") - if err := cg.Create(); err != nil { - t.Fatal(err) - } - defer func() { - if err := cg.Delete(); err != nil { - t.Error(err) - } - }() - - instance := cg.NewInstance() - if err := instance.Register([]string{"test.4"}); err != nil { - t.Fatal(err) - } - - if err := instance.ClaimPartition("test.4", 0); err != nil { - t.Error(err) - } - - // When: claim the same partition again - err = instance.ClaimPartition("test.4", 0) - - // Then - if err != nil { - t.Error(err) - } - - // Cleanup - if err := instance.ReleasePartition("test.4", 0); err != nil { - t.Error(err) - } - if err := instance.Deregister(); err != nil { - t.Error(err) - } -} - -func TestConsumergroupInstanceWatchPartitionClaim(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - defer assertSuccessfulClose(t, kz) - - cg := kz.Consumergroup("test.kazoo.TestConsumergroupInstanceWatchPartitionClaim") - if err := cg.Create(); err != nil { - t.Fatal(err) - } - defer func() { - if err := cg.Delete(); err != nil { - t.Error(err) - } - }() - - instance1 := cg.NewInstance() - if err := instance1.Register([]string{"test.4"}); err != nil { - t.Fatal(err) - } - - // Assert the partition isn't claimed - instance, change, err := cg.WatchPartitionOwner("test.4", 0) - if err != nil { - t.Fatal(err) - } - if instance != nil { - t.Fatal("An unclaimed partition should not return an instance") - } - if change != nil { - t.Fatal("An unclaimed partition should not return a watch") - } - - // Now claim the partition - if err := instance1.ClaimPartition("test.4", 0); err != nil { - t.Fatal(err) - } - - // This time, we should get an insance back - instance, change, err = cg.WatchPartitionOwner("test.4", 0) - if err != nil { - t.Fatal(err) - } - - if instance.ID != instance1.ID { - t.Error("Our instance should have claimed the partition") - } - - go func() { - time.Sleep(100 * time.Millisecond) - if err := instance1.ReleasePartition("test.4", 0); err != nil { - t.Fatal(err) - } - }() - - // Wait for the zookeeper watch to trigger - <-change - - // Ensure the partition is no longer claimed - instance, err = cg.PartitionOwner("test.4", 0) - if err != nil { - t.Fatal(err) - } - if instance != nil { - t.Error("The partition should have been release by now") - } - - // Cleanup - if err := instance1.Deregister(); err != nil { - t.Error(err) - } -} - -func TestConsumergroupOffsets(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - defer assertSuccessfulClose(t, kz) - - cg := kz.Consumergroup("test.kazoo.TestConsumergroupOffsets") - if err := cg.Create(); err != nil { - t.Fatal(err) - } - defer func() { - if err := cg.Delete(); err != nil { - t.Error(err) - } - }() - - offset, err := cg.FetchOffset("test", 0) - if err != nil { - t.Error(err) - } - - if offset >= 0 { - t.Error("Expected to get a negative offset for a partition that hasn't seen an offset commit yet") - } - - if err := cg.CommitOffset("test", 0, 1234); err != nil { - t.Error(err) - } - - offset, err = cg.FetchOffset("test", 0) - if err != nil { - t.Error(err) - } - if offset != 1234 { - t.Error("Expected to get the offset that was committed.") - } -} - -func TestConsumergroupResetOffsetsRace(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - defer assertSuccessfulClose(t, kz) - - cg := kz.Consumergroup("test.kazoo.TestConsumergroupResetOffsetsRace") - if err := cg.Create(); err != nil { - t.Fatal(err) - } - defer func() { - if err := cg.Delete(); err != nil { - t.Error(err) - } - }() - - offsets, err := cg.FetchAllOffsets() - if err != nil { - t.Error(err) - } - - if len(offsets) > 0 { - t.Errorf("A new consumergroup shouldn't have any offsets set, but found offsets for %d topics", len(offsets)) - } - - if err := cg.CommitOffset("test", 0, 1234); err != nil { - t.Error(err) - } - - if err := cg.CommitOffset("test", 1, 2345); err != nil { - t.Error(err) - } - - offsets, err = cg.FetchAllOffsets() - if err != nil { - t.Error(err) - } - - if offsets["test"][0] == 1234 && offsets["test"][1] == 2345 { - t.Log("All offsets present in offset map") - } else { - t.Logf("Offset map not as expected: %v", offsets) - } - - cg2 := kz.Consumergroup("test.kazoo.TestConsumergroupResetOffsetsRace") - - var wg sync.WaitGroup - wg.Add(2) - go func() { - defer wg.Done() - if err := cg2.ResetOffsets(); err != nil { - t.Fatal(err) - } - }() - go func() { - defer wg.Done() - if err := cg.ResetOffsets(); err != nil { - t.Fatal(err) - } - }() - - wg.Wait() - - offsets, err = cg.FetchAllOffsets() - if err != nil { - t.Error(err) - } - - if len(offsets) > 0 { - t.Errorf("After a reset, consumergroup shouldn't have any offsets set, but found offsets for %d topics", len(offsets)) - } -} - -func TestConsumergroupResetOffsets(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - defer assertSuccessfulClose(t, kz) - - cg := kz.Consumergroup("test.kazoo.TestConsumergroupResetOffsets") - if err := cg.Create(); err != nil { - t.Fatal(err) - } - defer func() { - if err := cg.Delete(); err != nil { - t.Error(err) - } - }() - - offsets, err := cg.FetchAllOffsets() - if err != nil { - t.Error(err) - } - - if len(offsets) > 0 { - t.Errorf("A new consumergroup shouldn't have any offsets set, but found offsets for %d topics", len(offsets)) - } - - if err := cg.CommitOffset("test1", 0, 1234); err != nil { - t.Error(err) - } - - if err := cg.CommitOffset("test1", 1, 2345); err != nil { - t.Error(err) - } - - if err := cg.CommitOffset("test2", 0, 3456); err != nil { - t.Error(err) - } - - offsets, err = cg.FetchAllOffsets() - if err != nil { - t.Error(err) - } - - if offsets["test1"][0] == 1234 && offsets["test1"][1] == 2345 && offsets["test2"][0] == 3456 { - t.Log("All offsets present in offset map") - } else { - t.Logf("Offset map not as expected: %v", offsets) - } - - if err := cg.ResetOffsets(); err != nil { - t.Fatal(err) - } - - offsets, err = cg.FetchAllOffsets() - if err != nil { - t.Error(err) - } - - if len(offsets) > 0 { - t.Errorf("After a reset, consumergroup shouldn't have any offsets set, but found offsets for %d topics", len(offsets)) - } -} diff --git a/vendor/github.com/krallistic/kazoo-go/functional_topic_admin_test.go b/vendor/github.com/krallistic/kazoo-go/functional_topic_admin_test.go deleted file mode 100644 index 9313781f..00000000 --- a/vendor/github.com/krallistic/kazoo-go/functional_topic_admin_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package kazoo - -import ( - "reflect" - "testing" - "time" -) - -func TestCreateDeleteTopic(t *testing.T) { - tests := []struct { - name string - partitionCount int - config map[string]string - err error - }{ - {"test.admin.1", 1, nil, nil}, - {"test.admin.1", 1, nil, ErrTopicExists}, - {"test.admin.2", 1, map[string]string{}, nil}, - {"test.admin.3", 4, map[string]string{"retention.ms": "604800000"}, nil}, - {"test.admin.3", 3, nil, ErrTopicExists}, - {"test.admin.4", 12, map[string]string{"retention.bytes": "1000000000", "retention.ms": "9999999"}, nil}, - } - - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - - for testIdx, test := range tests { - err = kz.CreateTopic(test.name, test.partitionCount, 1, test.config) - if err != test.err { - t.Errorf("Unexpected error (%v) creating %s for test %d", err, test.name, testIdx) - continue - } - if err == nil { - topic := kz.Topic(test.name) - conf, err := topic.Config() - if err != nil { - t.Errorf("Unable to get topic config (%v) for %s for test %d", err, test.name, testIdx) - } - // allow for nil == empty map - if !reflect.DeepEqual(conf, test.config) && !(test.config == nil && len(conf) == 0) { - t.Errorf("Invalid config for %s in test %d. Expected (%v) got (%v)", test.name, testIdx, conf, test.config) - } - } - - } - - // delete all test topics - topicMap := make(map[string]bool) - for _, test := range tests { - // delete if we haven't seen the topic before - if _, ok := topicMap[test.name]; !ok { - err := kz.DeleteTopic(test.name) - if err != nil { - t.Errorf("Unable to delete topic %s (%v)", test.name, err) - } - } - topicMap[test.name] = true - } - - totalToDelete := len(topicMap) - - // wait for deletion (up to 60s) - for i := 0; i < 15; i++ { - for name := range topicMap { - topic := &Topic{kz: kz, Name: name} - if exists, _ := topic.Exists(); !exists { - delete(topicMap, name) - } - } - // all topics deleted - if len(topicMap) == 0 { - break - } - time.Sleep(1 * time.Second) - } - - if len(topicMap) != 0 { - t.Errorf("Unable to delete all topics %d out of %d remaining after 15 seconds", len(topicMap), totalToDelete) - } -} - -func TestDeleteTopicSync(t *testing.T) { - - kz, err := NewKazoo(zookeeperPeers, nil) - - topicName := "test.admin.1" - - if err != nil { - t.Fatal(err) - } - - err = kz.CreateTopic(topicName, 1, 1, nil) - - if err != nil { - t.Errorf("Unexpected error (%v) creating topic %s", err, topicName) - } - - topic := kz.Topic("test.admin.1") - _, err = topic.Config() - - if err != nil { - t.Errorf("Unable to get topic config (%v) for %s", err, topicName) - } - - // delete the topic synchronously - err = kz.DeleteTopicSync(topicName, 0) - - if err != nil { - t.Errorf("Unexpected error (%v) while deleting topic synchronously", err) - } - - exists, err := topic.Exists() - - if err != nil { - t.Errorf("Unexpected error (%v) while checking if topic exists", err) - } - - if exists { - t.Error("Deleted topic still exists.") - } -} diff --git a/vendor/github.com/krallistic/kazoo-go/functional_topic_metadata_test.go b/vendor/github.com/krallistic/kazoo-go/functional_topic_metadata_test.go deleted file mode 100644 index 34c8f42b..00000000 --- a/vendor/github.com/krallistic/kazoo-go/functional_topic_metadata_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package kazoo - -import ( - "testing" -) - -func TestTopics(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - - topics, err := kz.Topics() - if err != nil { - t.Error(err) - } - - existingTopic := topics.Find("test.4") - if existingTopic == nil { - t.Error("Expected topic test.4 to be returned") - } else if existingTopic.Name != "test.4" { - t.Error("Expected topic test.4 to have its name set") - } - - nonexistingTopic := topics.Find("__nonexistent__") - if nonexistingTopic != nil { - t.Error("Expected __nonexistent__ topic to not be defined") - } - - assertSuccessfulClose(t, kz) -} - -func TestTopicPartitions(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - - partitions, err := kz.Topic("test.4").Partitions() - if err != nil { - t.Fatal(err) - } - - if len(partitions) != 4 { - t.Errorf("Expected test.4 to have 4 partitions") - } - - brokers, err := kz.Brokers() - if err != nil { - t.Fatal(err) - } - - for index, partition := range partitions { - if partition.ID != int32(index) { - t.Error("partition.ID is not set properly") - } - - leader, err := partition.Leader() - if err != nil { - t.Fatal(err) - } - - if _, ok := brokers[leader]; !ok { - t.Errorf("Expected the leader of test.4/%d to be an existing broker.", partition.ID) - } - - isr, err := partition.ISR() - if err != nil { - t.Fatal(err) - } - - for _, brokerID := range isr { - if _, ok := brokers[brokerID]; !ok { - t.Errorf("Expected all ISRs of test.4/%d to be existing brokers.", partition.ID) - } - } - } - - assertSuccessfulClose(t, kz) -} - -func TestTopicConfig(t *testing.T) { - kz, err := NewKazoo(zookeeperPeers, nil) - if err != nil { - t.Fatal(err) - } - - topicConfig, err := kz.Topic("test.4").Config() - if err != nil { - t.Error(err) - } - if topicConfig["retention.ms"] != "604800000" { - t.Error("Expected retention.ms config for test.4 to be set to 604800000") - } - - topicConfig, err = kz.Topic("test.1").Config() - if err != nil { - t.Error(err) - } - if len(topicConfig) > 0 { - t.Error("Expected no topic level configuration to be set for test.1") - } - - assertSuccessfulClose(t, kz) -} - -func assertSuccessfulClose(t *testing.T, kz *Kazoo) { - if err := kz.Close(); err != nil { - t.Error(err) - } -} diff --git a/vendor/github.com/krallistic/kazoo-go/kazoo_test.go b/vendor/github.com/krallistic/kazoo-go/kazoo_test.go deleted file mode 100644 index c32ba0ea..00000000 --- a/vendor/github.com/krallistic/kazoo-go/kazoo_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package kazoo - -import ( - "testing" -) - -func TestBuildConnectionString(t *testing.T) { - nodes := []string{"zk1:2181", "zk2:2181", "zk3:2181"} - - if str := BuildConnectionString(nodes); str != "zk1:2181,zk2:2181,zk3:2181" { - t.Errorf("The connection string was not built correctly: %s", str) - } - - if str := BuildConnectionStringWithChroot(nodes, "/chroot"); str != "zk1:2181,zk2:2181,zk3:2181/chroot" { - t.Errorf("The connection string was not built correctly: %s", str) - } -} - -func TestParseConnectionString(t *testing.T) { - var ( - nodes []string - chroot string - ) - - nodes, chroot = ParseConnectionString("zookeeper/chroot") - if len(nodes) != 1 || nodes[0] != "zookeeper" { - t.Error("Parsed nodes incorrectly:", nodes) - } - if chroot != "/chroot" { - t.Error("Parsed chroot incorrectly:", chroot) - } - - nodes, chroot = ParseConnectionString("zk1:2181,zk2:2181,zk3:2181") - if len(nodes) != 3 || nodes[0] != "zk1:2181" || nodes[1] != "zk2:2181" || nodes[2] != "zk3:2181" { - t.Error("Parsed nodes incorrectly:", nodes) - } - if chroot != "" { - t.Error("Parsed chroot incorrectly:", chroot) - } - - nodes, chroot = ParseConnectionString("zk1:2181,zk2/nested/chroot") - if len(nodes) != 2 || nodes[0] != "zk1:2181" || nodes[1] != "zk2" { - t.Error("Parsed nodes incorrectly:", nodes) - } - if chroot != "/nested/chroot" { - t.Error("Parsed chroot incorrectly:", chroot) - } - - nodes, chroot = ParseConnectionString("") - if len(nodes) != 1 || nodes[0] != "" { - t.Error("Parsed nodes incorrectly:", nodes) - } - if chroot != "" { - t.Error("Parsed chroot incorrectly:", chroot) - } -} diff --git a/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/.gitignore b/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/.gitignore deleted file mode 100644 index c93d3c5b..00000000 --- a/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -kafka-topics -kafka-topics.test diff --git a/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/kafka-topics.go b/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/kafka-topics.go deleted file mode 100644 index 2262f8de..00000000 --- a/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/kafka-topics.go +++ /dev/null @@ -1,93 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "os" - "sort" - "sync" - "time" - - "github.com/wvanbergen/kazoo-go" -) - -var ( - zookeeper = flag.String("zookeeper", os.Getenv("ZOOKEEPER_PEERS"), "Zookeeper connection string. It can include a chroot.") - zookeeperTimeout = flag.Int("zookeeper-timeout", 1000, "Zookeeper timeout in milliseconds.") -) - -func main() { - flag.Parse() - - if *zookeeper == "" { - printUsageErrorAndExit("You have to provide a zookeeper connection string using -zookeeper, or the ZOOKEEPER_PEERS environment variable") - } - - conf := kazoo.NewConfig() - conf.Timeout = time.Duration(*zookeeperTimeout) * time.Millisecond - - kz, err := kazoo.NewKazooFromConnectionString(*zookeeper, conf) - if err != nil { - printErrorAndExit(69, "Failed to connect to Zookeeper: %v", err) - } - defer func() { _ = kz.Close() }() - - topics, err := kz.Topics() - if err != nil { - printErrorAndExit(69, "Failed to get Kafka topics from Zookeeper: %v", err) - } - sort.Sort(topics) - - var ( - wg sync.WaitGroup - l sync.Mutex - stdout = make([]string, len(topics)) - ) - - for i, topic := range topics { - wg.Add(1) - go func(i int, topic *kazoo.Topic) { - defer wg.Done() - - buffer := bytes.NewBuffer(make([]byte, 0)) - - partitions, err := topic.Partitions() - if err != nil { - printErrorAndExit(69, "Failed to get Kafka topic partitions from Zookeeper: %v", err) - } - - fmt.Fprintf(buffer, "Topic: %s\tPartitions: %d\n", topic.Name, len(partitions)) - - for _, partition := range partitions { - leader, _ := partition.Leader() - isr, _ := partition.ISR() - - fmt.Fprintf(buffer, "\tPartition: %d\tReplicas: %v\tLeader: %d\tISR: %v\n", partition.ID, partition.Replicas, leader, isr) - } - - l.Lock() - stdout[i] = buffer.String() - l.Unlock() - }(i, topic) - } - - wg.Wait() - for _, msg := range stdout { - fmt.Print(msg) - } -} - -func printUsageErrorAndExit(format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - fmt.Fprintln(os.Stderr, "Available command line options:") - flag.PrintDefaults() - os.Exit(64) -} - -func printErrorAndExit(code int, format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - os.Exit(code) -} diff --git a/vendor/github.com/krallistic/kazoo-go/topic_metadata_test.go b/vendor/github.com/krallistic/kazoo-go/topic_metadata_test.go deleted file mode 100644 index 72a3f00c..00000000 --- a/vendor/github.com/krallistic/kazoo-go/topic_metadata_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package kazoo - -import ( - "sort" - "testing" -) - -func TestPartition(t *testing.T) { - topic := &Topic{Name: "test"} - partition := topic.Partition(1, []int32{1, 2, 3}) - - if key := partition.Key(); key != "test/1" { - t.Error("Unexpected partition key", key) - } - - if partition.Topic() != topic { - t.Error("Expected Topic() to return the topic the partition was created from.") - } - - if pr := partition.PreferredReplica(); pr != 1 { - t.Error("Expected 1 to be the preferred replica, but found", pr) - } - - partitionWithoutReplicas := topic.Partition(1, nil) - if pr := partitionWithoutReplicas.PreferredReplica(); pr != -1 { - t.Error("Expected -1 to be returned if the partition does not have replicas, but found", pr) - } -} - -func TestTopicList(t *testing.T) { - topics := TopicList{ - &Topic{Name: "foo"}, - &Topic{Name: "bar"}, - &Topic{Name: "baz"}, - } - - sort.Sort(topics) - - if topics[0].Name != "bar" || topics[1].Name != "baz" || topics[2].Name != "foo" { - t.Error("Unexpected order after sorting topic list", topics) - } - - topic := topics.Find("foo") - if topic != topics[2] { - t.Error("Should have found foo topic from the list") - } -} - -func TestPartitionList(t *testing.T) { - var ( - topic1 = &Topic{Name: "1"} - topic2 = &Topic{Name: "2"} - ) - - var ( - partition21 = topic2.Partition(1, nil) - partition12 = topic1.Partition(2, nil) - partition11 = topic1.Partition(1, nil) - ) - - partitions := PartitionList{partition21, partition12, partition11} - sort.Sort(partitions) - - if partitions[0] != partition11 || partitions[1] != partition12 || partitions[2] != partition21 { - t.Error("Unexpected order after sorting topic list", partitions) - } -} - -func TestGeneratePartitionAssignments(t *testing.T) { - // check for errors - tests := []struct { - brokers []int32 - partitionCount int - replicationFactor int - err error - }{ - {[]int32{1, 2}, -1, 1, ErrInvalidPartitionCount}, - {[]int32{1, 2}, 0, 1, ErrInvalidPartitionCount}, - {[]int32{}, 1, 1, ErrInvalidReplicationFactor}, - {[]int32{1, 2}, 1, -1, ErrInvalidReplicationFactor}, - {[]int32{1, 2}, 2, 0, ErrInvalidReplicationFactor}, - {[]int32{1, 2}, 3, 3, ErrInvalidReplicationFactor}, - {[]int32{1, 2}, 2, 1, nil}, - {[]int32{1, 2}, 10, 2, nil}, - {[]int32{1}, 10, 1, nil}, - {[]int32{1, 2, 3, 4, 5}, 1, 1, nil}, - {[]int32{1, 2, 3, 4, 5}, 1, 3, nil}, - {[]int32{1, 2, 3, 4, 5}, 10, 2, nil}, - } - - for testIdx, test := range tests { - topic := &Topic{Name: "t"} - - res, err := topic.generatePartitionAssignments(test.brokers, test.partitionCount, test.replicationFactor) - if err != test.err { - t.Errorf("Incorrect error for test %d. Expected (%v) got (%v)", testIdx, test.err, err) - } else if err == nil { - // proper number of paritions - if len(res) != test.partitionCount { - t.Errorf("Wrong number of partitions assigned in test %d. Expected %d got %d", testIdx, test.partitionCount, len(res)) - } - // ensure all petitions are assigned and that they have - // the right number of non-overlapping brokers - for i, part := range res { - if part == nil { - t.Errorf("Partition %d is nil in test %d", i, testIdx) - continue - } - if len(part.Replicas) != test.replicationFactor { - t.Errorf("Partition %d does not have the correct number of brokers in test %d. Expected %d got %d", i, testIdx, test.replicationFactor, len(part.Replicas)) - } - replicaMap := make(map[int32]bool, test.replicationFactor) - for _, r := range part.Replicas { - // ensure broker is in initial broker list - found := false - for _, broker := range test.brokers { - if broker == r { - found = true - break - } - } - if !found { - t.Errorf("Partition %d has an invalid broker id %d in test %d", i, r, testIdx) - } - replicaMap[r] = true - } - if len(replicaMap) != len(part.Replicas) { - t.Errorf("Partition %d has overlapping broker assignments (%v) in test %d", i, part.Replicas, testIdx) - } - } - } - } -} - -func TestValidatePartitionAssignments(t *testing.T) { - // check for errors - tests := []struct { - brokers []int32 - partitions PartitionList - err error - }{ - {[]int32{1}, PartitionList{}, ErrInvalidPartitionCount}, - - {[]int32{1}, PartitionList{ - {ID: 0, Replicas: []int32{}}, - }, ErrInvalidReplicationFactor}, - - {[]int32{1, 2}, PartitionList{ - {ID: 0, Replicas: []int32{1}}, - {ID: 1, Replicas: []int32{1, 2}}, - }, ErrInvalidReplicaCount}, - - {[]int32{1, 2}, PartitionList{ - {ID: 0, Replicas: []int32{1, 2}}, - {ID: 1, Replicas: []int32{1}}, - }, ErrInvalidReplicaCount}, - - {[]int32{1, 2}, PartitionList{ - {ID: 0, Replicas: []int32{1, 2}}, - {ID: 1, Replicas: []int32{2, 2}}, - }, ErrReplicaBrokerOverlap}, - - {[]int32{1, 2}, PartitionList{ - {ID: 0, Replicas: []int32{1, 3}}, - {ID: 1, Replicas: []int32{2, 1}}, - }, ErrInvalidBroker}, - - {[]int32{1, 2, 3}, PartitionList{ - {ID: 1, Replicas: []int32{1, 3}}, - {ID: 2, Replicas: []int32{2, 1}}, - }, ErrMissingPartitionID}, - - {[]int32{1, 2, 3}, PartitionList{ - {ID: 0, Replicas: []int32{1, 3}}, - {ID: 2, Replicas: []int32{2, 1}}, - }, ErrMissingPartitionID}, - - {[]int32{1, 2, 3}, PartitionList{ - {ID: 0, Replicas: []int32{1, 3}}, - {ID: 0, Replicas: []int32{1, 3}}, - {ID: 2, Replicas: []int32{2, 1}}, - }, ErrDuplicatePartitionID}, - - {[]int32{1}, PartitionList{ - {ID: 0, Replicas: []int32{1}}, - }, nil}, - - {[]int32{1}, PartitionList{ - {ID: 0, Replicas: []int32{1}}, - {ID: 1, Replicas: []int32{1}}, - {ID: 2, Replicas: []int32{1}}, - }, nil}, - - {[]int32{1, 2, 3}, PartitionList{ - {ID: 0, Replicas: []int32{1, 2}}, - {ID: 1, Replicas: []int32{2, 3}}, - {ID: 2, Replicas: []int32{3, 1}}, - }, nil}, - } - - for testIdx, test := range tests { - topic := &Topic{Name: "t"} - - err := topic.validatePartitionAssignments(test.brokers, test.partitions) - if err != test.err { - t.Errorf("Incorrect error for test %d. Expected (%v) got (%v)", testIdx, test.err, err) - } else if err == nil { - } - } -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml b/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml deleted file mode 100644 index 5db25803..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.5 - - 1.6 - - tip - -script: make -f Makefile.TRAVIS diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS b/vendor/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS deleted file mode 100644 index 24f9649e..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS +++ /dev/null @@ -1,15 +0,0 @@ -all: build cover test vet - -build: - go build -v ./... - -cover: test - $(MAKE) -C pbutil cover - -test: build - go test -v ./... - -vet: build - go vet -v ./... - -.PHONY: build cover test vet diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/README.md b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md deleted file mode 100644 index 751ee696..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Overview -This repository provides various Protocol Buffer extensions for the Go -language (golang), namely support for record length-delimited message -streaming. - -| Java | Go | -| ------------------------------ | --------------------- | -| MessageLite#parseDelimitedFrom | pbutil.ReadDelimited | -| MessageLite#writeDelimitedTo | pbutil.WriteDelimited | - -Because [Code Review 9102043](https://codereview.appspot.com/9102043/) is -destined to never be merged into mainline (i.e., never be promoted to formal -[goprotobuf features](https://github.com/golang/protobuf)), this repository -will live here in the wild. - -# Documentation -We have [generated Go Doc documentation](http://godoc.org/github.com/matttproud/golang_protobuf_extensions/pbutil) here. - -# Testing -[![Build Status](https://travis-ci.org/matttproud/golang_protobuf_extensions.png?branch=master)](https://travis-ci.org/matttproud/golang_protobuf_extensions) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/ext/moved.go b/vendor/github.com/matttproud/golang_protobuf_extensions/ext/moved.go deleted file mode 100644 index f31a0f04..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/ext/moved.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package ext moved to a new location: github.com/matttproud/golang_protobuf_extensions/pbutil. -package ext diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbtest/deleted.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbtest/deleted.go deleted file mode 100644 index 73efcb18..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbtest/deleted.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package pbtest is deleted for the time being, because upstream Protocol Buffer 3 may have rendered quick.Value-based blackbox generation impossible. -package pbtest diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go deleted file mode 100644 index a793c885..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "bytes" - "testing" - - "github.com/golang/protobuf/proto" - - . "github.com/matttproud/golang_protobuf_extensions/testdata" -) - -func TestWriteDelimited(t *testing.T) { - t.Parallel() - for _, test := range []struct { - msg proto.Message - buf []byte - n int - err error - }{ - { - msg: &Empty{}, - n: 1, - buf: []byte{0}, - }, - { - msg: &GoEnum{Foo: FOO_FOO1.Enum()}, - n: 3, - buf: []byte{2, 8, 1}, - }, - { - msg: &Strings{ - StringField: proto.String(`This is my gigantic, unhappy string. It exceeds -the encoding size of a single byte varint. We are using it to fuzz test the -correctness of the header decoding mechanisms, which may prove problematic. -I expect it may. Let's hope you enjoy testing as much as we do.`), - }, - n: 271, - buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, - 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, - 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, - 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, - 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, - 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, - 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, - 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, - 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, - 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, - 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, - 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, - 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, - 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, - 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, - 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, - 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, - 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, - }, - } { - var buf bytes.Buffer - if n, err := WriteDelimited(&buf, test.msg); n != test.n || err != test.err { - t.Fatalf("WriteDelimited(buf, %#v) = %v, %v; want %v, %v", test.msg, n, err, test.n, test.err) - } - if out := buf.Bytes(); !bytes.Equal(out, test.buf) { - t.Fatalf("WriteDelimited(buf, %#v); buf = %v; want %v", test.msg, out, test.buf) - } - } -} - -func TestReadDelimited(t *testing.T) { - t.Parallel() - for _, test := range []struct { - buf []byte - msg proto.Message - n int - err error - }{ - { - buf: []byte{0}, - msg: &Empty{}, - n: 1, - }, - { - n: 3, - buf: []byte{2, 8, 1}, - msg: &GoEnum{Foo: FOO_FOO1.Enum()}, - }, - { - buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, - 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, - 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, - 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, - 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, - 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, - 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, - 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, - 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, - 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, - 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, - 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, - 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, - 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, - 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, - 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, - 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, - 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, - msg: &Strings{ - StringField: proto.String(`This is my gigantic, unhappy string. It exceeds -the encoding size of a single byte varint. We are using it to fuzz test the -correctness of the header decoding mechanisms, which may prove problematic. -I expect it may. Let's hope you enjoy testing as much as we do.`), - }, - n: 271, - }, - } { - msg := proto.Clone(test.msg) - msg.Reset() - if n, err := ReadDelimited(bytes.NewBuffer(test.buf), msg); n != test.n || err != test.err { - t.Fatalf("ReadDelimited(%v, msg) = %v, %v; want %v, %v", test.buf, n, err, test.n, test.err) - } - if !proto.Equal(msg, test.msg) { - t.Fatalf("ReadDelimited(%v, msg); msg = %v; want %v", test.buf, msg, test.msg) - } - } -} - -func TestEndToEndValid(t *testing.T) { - t.Parallel() - for _, test := range [][]proto.Message{ - {&Empty{}}, - {&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}}, - {&GoEnum{Foo: FOO_FOO1.Enum()}}, - {&Strings{ - StringField: proto.String(`This is my gigantic, unhappy string. It exceeds -the encoding size of a single byte varint. We are using it to fuzz test the -correctness of the header decoding mechanisms, which may prove problematic. -I expect it may. Let's hope you enjoy testing as much as we do.`), - }}, - } { - var buf bytes.Buffer - var written int - for i, msg := range test { - n, err := WriteDelimited(&buf, msg) - if err != nil { - // Assumption: TestReadDelimited and TestWriteDelimited are sufficient - // and inputs for this test are explicitly exercised there. - t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", test, i, err) - } - written += n - } - var read int - for i, msg := range test { - out := proto.Clone(msg) - out.Reset() - n, _ := ReadDelimited(&buf, out) - // Decide to do EOF checking? - read += n - if !proto.Equal(out, msg) { - t.Fatalf("out = %v; want %v[%d] = %#v", out, test, i, msg) - } - } - if read != written { - t.Fatalf("%v read = %d; want %d", test, read, written) - } - } -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go deleted file mode 100644 index 364a7b79..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2016 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "bytes" - "io" - "testing" - "testing/iotest" -) - -func TestReadDelimitedIllegalVarint(t *testing.T) { - t.Parallel() - var tests = []struct { - in []byte - n int - err error - }{ - { - in: []byte{255, 255, 255, 255, 255}, - n: 5, - err: errInvalidVarint, - }, - { - in: []byte{255, 255, 255, 255, 255, 255}, - n: 5, - err: errInvalidVarint, - }, - } - for _, test := range tests { - n, err := ReadDelimited(bytes.NewReader(test.in), nil) - if got, want := n, test.n; got != want { - t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", test.in, got, want) - } - if got, want := err, test.err; got != want { - t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", test.in, got, want) - } - } -} - -func TestReadDelimitedPrematureHeader(t *testing.T) { - t.Parallel() - var data = []byte{128, 5} // 256 + 256 + 128 - n, err := ReadDelimited(bytes.NewReader(data[0:1]), nil) - if got, want := n, 1; got != want { - t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want) - } - if got, want := err, io.EOF; got != want { - t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want) - } -} - -func TestReadDelimitedPrematureBody(t *testing.T) { - t.Parallel() - var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128 - n, err := ReadDelimited(bytes.NewReader(data[:]), nil) - if got, want := n, 5; got != want { - t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want) - } - if got, want := err, io.ErrUnexpectedEOF; got != want { - t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want) - } -} - -func TestReadDelimitedPrematureHeaderIncremental(t *testing.T) { - t.Parallel() - var data = []byte{128, 5} // 256 + 256 + 128 - n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[0:1])), nil) - if got, want := n, 1; got != want { - t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want) - } - if got, want := err, io.EOF; got != want { - t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want) - } -} - -func TestReadDelimitedPrematureBodyIncremental(t *testing.T) { - t.Parallel() - var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128 - n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[:])), nil) - if got, want := n, 5; got != want { - t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want) - } - if got, want := err, io.ErrUnexpectedEOF; got != want { - t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want) - } -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go deleted file mode 100644 index f92632b0..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2016 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "bytes" - "errors" - "testing" - - "github.com/golang/protobuf/proto" -) - -var errMarshal = errors.New("pbutil: can't marshal") - -type cantMarshal struct{ proto.Message } - -func (cantMarshal) Marshal() ([]byte, error) { return nil, errMarshal } - -var _ proto.Message = cantMarshal{} - -func TestWriteDelimitedMarshalErr(t *testing.T) { - t.Parallel() - var data cantMarshal - var buf bytes.Buffer - n, err := WriteDelimited(&buf, data) - if got, want := n, 0; got != want { - t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want) - } - if got, want := err, errMarshal; got != want { - t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want) - } -} - -type canMarshal struct{ proto.Message } - -func (canMarshal) Marshal() ([]byte, error) { return []byte{0, 1, 2, 3, 4, 5}, nil } - -var errWrite = errors.New("pbutil: can't write") - -type cantWrite struct{} - -func (cantWrite) Write([]byte) (int, error) { return 0, errWrite } - -func TestWriteDelimitedWriteErr(t *testing.T) { - t.Parallel() - var data canMarshal - var buf cantWrite - n, err := WriteDelimited(buf, data) - if got, want := n, 0; got != want { - t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want) - } - if got, want := err, errWrite; got != want { - t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want) - } -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY b/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY deleted file mode 100644 index 0c1f8424..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY +++ /dev/null @@ -1,4 +0,0 @@ -test.pb.go and test.proto are third-party data. - -SOURCE: https://github.com/golang/protobuf -REVISION: bf531ff1a004f24ee53329dfd5ce0b41bfdc17df diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go b/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go deleted file mode 100644 index 772adcb6..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go +++ /dev/null @@ -1,4029 +0,0 @@ -// Code generated by protoc-gen-go. -// source: test.proto -// DO NOT EDIT! - -/* -Package testdata is a generated protocol buffer package. - -It is generated from these files: - test.proto - -It has these top-level messages: - GoEnum - GoTestField - GoTest - GoSkipTest - NonPackedTest - PackedTest - MaxTag - OldMessage - NewMessage - InnerMessage - OtherMessage - RequiredInnerMessage - MyMessage - Ext - ComplexExtension - DefaultsMessage - MyMessageSet - Empty - MessageList - Strings - Defaults - SubDefaults - RepeatedEnum - MoreRepeated - GroupOld - GroupNew - FloatingPoint - MessageWithMap - Oneof - Communique -*/ -package testdata - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.ProtoPackageIsVersion1 - -type FOO int32 - -const ( - FOO_FOO1 FOO = 1 -) - -var FOO_name = map[int32]string{ - 1: "FOO1", -} -var FOO_value = map[string]int32{ - "FOO1": 1, -} - -func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p -} -func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) -} -func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") - if err != nil { - return err - } - *x = FOO(value) - return nil -} -func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -// An enum, for completeness. -type GoTest_KIND int32 - -const ( - GoTest_VOID GoTest_KIND = 0 - // Basic types - GoTest_BOOL GoTest_KIND = 1 - GoTest_BYTES GoTest_KIND = 2 - GoTest_FINGERPRINT GoTest_KIND = 3 - GoTest_FLOAT GoTest_KIND = 4 - GoTest_INT GoTest_KIND = 5 - GoTest_STRING GoTest_KIND = 6 - GoTest_TIME GoTest_KIND = 7 - // Groupings - GoTest_TUPLE GoTest_KIND = 8 - GoTest_ARRAY GoTest_KIND = 9 - GoTest_MAP GoTest_KIND = 10 - // Table types - GoTest_TABLE GoTest_KIND = 11 - // Functions - GoTest_FUNCTION GoTest_KIND = 12 -) - -var GoTest_KIND_name = map[int32]string{ - 0: "VOID", - 1: "BOOL", - 2: "BYTES", - 3: "FINGERPRINT", - 4: "FLOAT", - 5: "INT", - 6: "STRING", - 7: "TIME", - 8: "TUPLE", - 9: "ARRAY", - 10: "MAP", - 11: "TABLE", - 12: "FUNCTION", -} -var GoTest_KIND_value = map[string]int32{ - "VOID": 0, - "BOOL": 1, - "BYTES": 2, - "FINGERPRINT": 3, - "FLOAT": 4, - "INT": 5, - "STRING": 6, - "TIME": 7, - "TUPLE": 8, - "ARRAY": 9, - "MAP": 10, - "TABLE": 11, - "FUNCTION": 12, -} - -func (x GoTest_KIND) Enum() *GoTest_KIND { - p := new(GoTest_KIND) - *p = x - return p -} -func (x GoTest_KIND) String() string { - return proto.EnumName(GoTest_KIND_name, int32(x)) -} -func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") - if err != nil { - return err - } - *x = GoTest_KIND(value) - return nil -} -func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } - -type MyMessage_Color int32 - -const ( - MyMessage_RED MyMessage_Color = 0 - MyMessage_GREEN MyMessage_Color = 1 - MyMessage_BLUE MyMessage_Color = 2 -) - -var MyMessage_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var MyMessage_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x MyMessage_Color) Enum() *MyMessage_Color { - p := new(MyMessage_Color) - *p = x - return p -} -func (x MyMessage_Color) String() string { - return proto.EnumName(MyMessage_Color_name, int32(x)) -} -func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") - if err != nil { - return err - } - *x = MyMessage_Color(value) - return nil -} -func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } - -type DefaultsMessage_DefaultsEnum int32 - -const ( - DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 - DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 - DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 -) - -var DefaultsMessage_DefaultsEnum_name = map[int32]string{ - 0: "ZERO", - 1: "ONE", - 2: "TWO", -} -var DefaultsMessage_DefaultsEnum_value = map[string]int32{ - "ZERO": 0, - "ONE": 1, - "TWO": 2, -} - -func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { - p := new(DefaultsMessage_DefaultsEnum) - *p = x - return p -} -func (x DefaultsMessage_DefaultsEnum) String() string { - return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) -} -func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") - if err != nil { - return err - } - *x = DefaultsMessage_DefaultsEnum(value) - return nil -} -func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{15, 0} -} - -type Defaults_Color int32 - -const ( - Defaults_RED Defaults_Color = 0 - Defaults_GREEN Defaults_Color = 1 - Defaults_BLUE Defaults_Color = 2 -) - -var Defaults_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Defaults_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Defaults_Color) Enum() *Defaults_Color { - p := new(Defaults_Color) - *p = x - return p -} -func (x Defaults_Color) String() string { - return proto.EnumName(Defaults_Color_name, int32(x)) -} -func (x *Defaults_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") - if err != nil { - return err - } - *x = Defaults_Color(value) - return nil -} -func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{20, 0} } - -type RepeatedEnum_Color int32 - -const ( - RepeatedEnum_RED RepeatedEnum_Color = 1 -) - -var RepeatedEnum_Color_name = map[int32]string{ - 1: "RED", -} -var RepeatedEnum_Color_value = map[string]int32{ - "RED": 1, -} - -func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { - p := new(RepeatedEnum_Color) - *p = x - return p -} -func (x RepeatedEnum_Color) String() string { - return proto.EnumName(RepeatedEnum_Color_name, int32(x)) -} -func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") - if err != nil { - return err - } - *x = RepeatedEnum_Color(value) - return nil -} -func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{22, 0} } - -type GoEnum struct { - Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoEnum) Reset() { *m = GoEnum{} } -func (m *GoEnum) String() string { return proto.CompactTextString(m) } -func (*GoEnum) ProtoMessage() {} -func (*GoEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *GoEnum) GetFoo() FOO { - if m != nil && m.Foo != nil { - return *m.Foo - } - return FOO_FOO1 -} - -type GoTestField struct { - Label *string `protobuf:"bytes,1,req,name=Label,json=label" json:"Label,omitempty"` - Type *string `protobuf:"bytes,2,req,name=Type,json=type" json:"Type,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTestField) Reset() { *m = GoTestField{} } -func (m *GoTestField) String() string { return proto.CompactTextString(m) } -func (*GoTestField) ProtoMessage() {} -func (*GoTestField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *GoTestField) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" -} - -func (m *GoTestField) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -type GoTest struct { - // Some typical parameters - Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,json=kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` - Table *string `protobuf:"bytes,2,opt,name=Table,json=table" json:"Table,omitempty"` - Param *int32 `protobuf:"varint,3,opt,name=Param,json=param" json:"Param,omitempty"` - // Required, repeated and optional foreign fields. - RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` - RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField,json=repeatedField" json:"RepeatedField,omitempty"` - OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField,json=optionalField" json:"OptionalField,omitempty"` - // Required fields of all basic types - F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=fBoolRequired" json:"F_Bool_required,omitempty"` - F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=fInt32Required" json:"F_Int32_required,omitempty"` - F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=fInt64Required" json:"F_Int64_required,omitempty"` - F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=fFixed32Required" json:"F_Fixed32_required,omitempty"` - F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=fFixed64Required" json:"F_Fixed64_required,omitempty"` - F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=fUint32Required" json:"F_Uint32_required,omitempty"` - F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=fUint64Required" json:"F_Uint64_required,omitempty"` - F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=fFloatRequired" json:"F_Float_required,omitempty"` - F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=fDoubleRequired" json:"F_Double_required,omitempty"` - F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=fStringRequired" json:"F_String_required,omitempty"` - F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=fBytesRequired" json:"F_Bytes_required,omitempty"` - F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=fSint32Required" json:"F_Sint32_required,omitempty"` - F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=fSint64Required" json:"F_Sint64_required,omitempty"` - // Repeated fields of all basic types - F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=fBoolRepeated" json:"F_Bool_repeated,omitempty"` - F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=fInt32Repeated" json:"F_Int32_repeated,omitempty"` - F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=fInt64Repeated" json:"F_Int64_repeated,omitempty"` - F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=fFixed32Repeated" json:"F_Fixed32_repeated,omitempty"` - F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=fFixed64Repeated" json:"F_Fixed64_repeated,omitempty"` - F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=fUint32Repeated" json:"F_Uint32_repeated,omitempty"` - F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=fUint64Repeated" json:"F_Uint64_repeated,omitempty"` - F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=fFloatRepeated" json:"F_Float_repeated,omitempty"` - F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=fDoubleRepeated" json:"F_Double_repeated,omitempty"` - F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=fStringRepeated" json:"F_String_repeated,omitempty"` - F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=fBytesRepeated" json:"F_Bytes_repeated,omitempty"` - F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=fSint32Repeated" json:"F_Sint32_repeated,omitempty"` - F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=fSint64Repeated" json:"F_Sint64_repeated,omitempty"` - // Optional fields of all basic types - F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=fBoolOptional" json:"F_Bool_optional,omitempty"` - F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=fInt32Optional" json:"F_Int32_optional,omitempty"` - F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=fInt64Optional" json:"F_Int64_optional,omitempty"` - F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=fFixed32Optional" json:"F_Fixed32_optional,omitempty"` - F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=fFixed64Optional" json:"F_Fixed64_optional,omitempty"` - F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=fUint32Optional" json:"F_Uint32_optional,omitempty"` - F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=fUint64Optional" json:"F_Uint64_optional,omitempty"` - F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=fFloatOptional" json:"F_Float_optional,omitempty"` - F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=fDoubleOptional" json:"F_Double_optional,omitempty"` - F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=fStringOptional" json:"F_String_optional,omitempty"` - F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=fBytesOptional" json:"F_Bytes_optional,omitempty"` - F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=fSint32Optional" json:"F_Sint32_optional,omitempty"` - F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=fSint64Optional" json:"F_Sint64_optional,omitempty"` - // Default-valued fields of all basic types - F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=fBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"` - F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=fInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"` - F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=fInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"` - F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=fFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` - F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=fFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` - F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=fUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` - F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=fUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` - F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=fFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"` - F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=fDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"` - F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=fStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` - F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=fBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` - F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=fSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` - F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=fSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` - // Packed repeated fields (no string or bytes). - F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=fBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"` - F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=fInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"` - F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=fInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"` - F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=fFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"` - F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=fFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"` - F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=fUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"` - F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=fUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"` - F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=fFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"` - F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=fDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"` - F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=fSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"` - F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=fSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"` - Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"` - Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"` - Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest) Reset() { *m = GoTest{} } -func (m *GoTest) String() string { return proto.CompactTextString(m) } -func (*GoTest) ProtoMessage() {} -func (*GoTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -const Default_GoTest_F_BoolDefaulted bool = true -const Default_GoTest_F_Int32Defaulted int32 = 32 -const Default_GoTest_F_Int64Defaulted int64 = 64 -const Default_GoTest_F_Fixed32Defaulted uint32 = 320 -const Default_GoTest_F_Fixed64Defaulted uint64 = 640 -const Default_GoTest_F_Uint32Defaulted uint32 = 3200 -const Default_GoTest_F_Uint64Defaulted uint64 = 6400 -const Default_GoTest_F_FloatDefaulted float32 = 314159 -const Default_GoTest_F_DoubleDefaulted float64 = 271828 -const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" - -var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") - -const Default_GoTest_F_Sint32Defaulted int32 = -32 -const Default_GoTest_F_Sint64Defaulted int64 = -64 - -func (m *GoTest) GetKind() GoTest_KIND { - if m != nil && m.Kind != nil { - return *m.Kind - } - return GoTest_VOID -} - -func (m *GoTest) GetTable() string { - if m != nil && m.Table != nil { - return *m.Table - } - return "" -} - -func (m *GoTest) GetParam() int32 { - if m != nil && m.Param != nil { - return *m.Param - } - return 0 -} - -func (m *GoTest) GetRequiredField() *GoTestField { - if m != nil { - return m.RequiredField - } - return nil -} - -func (m *GoTest) GetRepeatedField() []*GoTestField { - if m != nil { - return m.RepeatedField - } - return nil -} - -func (m *GoTest) GetOptionalField() *GoTestField { - if m != nil { - return m.OptionalField - } - return nil -} - -func (m *GoTest) GetF_BoolRequired() bool { - if m != nil && m.F_BoolRequired != nil { - return *m.F_BoolRequired - } - return false -} - -func (m *GoTest) GetF_Int32Required() int32 { - if m != nil && m.F_Int32Required != nil { - return *m.F_Int32Required - } - return 0 -} - -func (m *GoTest) GetF_Int64Required() int64 { - if m != nil && m.F_Int64Required != nil { - return *m.F_Int64Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Required() uint32 { - if m != nil && m.F_Fixed32Required != nil { - return *m.F_Fixed32Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Required() uint64 { - if m != nil && m.F_Fixed64Required != nil { - return *m.F_Fixed64Required - } - return 0 -} - -func (m *GoTest) GetF_Uint32Required() uint32 { - if m != nil && m.F_Uint32Required != nil { - return *m.F_Uint32Required - } - return 0 -} - -func (m *GoTest) GetF_Uint64Required() uint64 { - if m != nil && m.F_Uint64Required != nil { - return *m.F_Uint64Required - } - return 0 -} - -func (m *GoTest) GetF_FloatRequired() float32 { - if m != nil && m.F_FloatRequired != nil { - return *m.F_FloatRequired - } - return 0 -} - -func (m *GoTest) GetF_DoubleRequired() float64 { - if m != nil && m.F_DoubleRequired != nil { - return *m.F_DoubleRequired - } - return 0 -} - -func (m *GoTest) GetF_StringRequired() string { - if m != nil && m.F_StringRequired != nil { - return *m.F_StringRequired - } - return "" -} - -func (m *GoTest) GetF_BytesRequired() []byte { - if m != nil { - return m.F_BytesRequired - } - return nil -} - -func (m *GoTest) GetF_Sint32Required() int32 { - if m != nil && m.F_Sint32Required != nil { - return *m.F_Sint32Required - } - return 0 -} - -func (m *GoTest) GetF_Sint64Required() int64 { - if m != nil && m.F_Sint64Required != nil { - return *m.F_Sint64Required - } - return 0 -} - -func (m *GoTest) GetF_BoolRepeated() []bool { - if m != nil { - return m.F_BoolRepeated - } - return nil -} - -func (m *GoTest) GetF_Int32Repeated() []int32 { - if m != nil { - return m.F_Int32Repeated - } - return nil -} - -func (m *GoTest) GetF_Int64Repeated() []int64 { - if m != nil { - return m.F_Int64Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed32Repeated() []uint32 { - if m != nil { - return m.F_Fixed32Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed64Repeated() []uint64 { - if m != nil { - return m.F_Fixed64Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint32Repeated() []uint32 { - if m != nil { - return m.F_Uint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint64Repeated() []uint64 { - if m != nil { - return m.F_Uint64Repeated - } - return nil -} - -func (m *GoTest) GetF_FloatRepeated() []float32 { - if m != nil { - return m.F_FloatRepeated - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeated() []float64 { - if m != nil { - return m.F_DoubleRepeated - } - return nil -} - -func (m *GoTest) GetF_StringRepeated() []string { - if m != nil { - return m.F_StringRepeated - } - return nil -} - -func (m *GoTest) GetF_BytesRepeated() [][]byte { - if m != nil { - return m.F_BytesRepeated - } - return nil -} - -func (m *GoTest) GetF_Sint32Repeated() []int32 { - if m != nil { - return m.F_Sint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Sint64Repeated() []int64 { - if m != nil { - return m.F_Sint64Repeated - } - return nil -} - -func (m *GoTest) GetF_BoolOptional() bool { - if m != nil && m.F_BoolOptional != nil { - return *m.F_BoolOptional - } - return false -} - -func (m *GoTest) GetF_Int32Optional() int32 { - if m != nil && m.F_Int32Optional != nil { - return *m.F_Int32Optional - } - return 0 -} - -func (m *GoTest) GetF_Int64Optional() int64 { - if m != nil && m.F_Int64Optional != nil { - return *m.F_Int64Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Optional() uint32 { - if m != nil && m.F_Fixed32Optional != nil { - return *m.F_Fixed32Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Optional() uint64 { - if m != nil && m.F_Fixed64Optional != nil { - return *m.F_Fixed64Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint32Optional() uint32 { - if m != nil && m.F_Uint32Optional != nil { - return *m.F_Uint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint64Optional() uint64 { - if m != nil && m.F_Uint64Optional != nil { - return *m.F_Uint64Optional - } - return 0 -} - -func (m *GoTest) GetF_FloatOptional() float32 { - if m != nil && m.F_FloatOptional != nil { - return *m.F_FloatOptional - } - return 0 -} - -func (m *GoTest) GetF_DoubleOptional() float64 { - if m != nil && m.F_DoubleOptional != nil { - return *m.F_DoubleOptional - } - return 0 -} - -func (m *GoTest) GetF_StringOptional() string { - if m != nil && m.F_StringOptional != nil { - return *m.F_StringOptional - } - return "" -} - -func (m *GoTest) GetF_BytesOptional() []byte { - if m != nil { - return m.F_BytesOptional - } - return nil -} - -func (m *GoTest) GetF_Sint32Optional() int32 { - if m != nil && m.F_Sint32Optional != nil { - return *m.F_Sint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Sint64Optional() int64 { - if m != nil && m.F_Sint64Optional != nil { - return *m.F_Sint64Optional - } - return 0 -} - -func (m *GoTest) GetF_BoolDefaulted() bool { - if m != nil && m.F_BoolDefaulted != nil { - return *m.F_BoolDefaulted - } - return Default_GoTest_F_BoolDefaulted -} - -func (m *GoTest) GetF_Int32Defaulted() int32 { - if m != nil && m.F_Int32Defaulted != nil { - return *m.F_Int32Defaulted - } - return Default_GoTest_F_Int32Defaulted -} - -func (m *GoTest) GetF_Int64Defaulted() int64 { - if m != nil && m.F_Int64Defaulted != nil { - return *m.F_Int64Defaulted - } - return Default_GoTest_F_Int64Defaulted -} - -func (m *GoTest) GetF_Fixed32Defaulted() uint32 { - if m != nil && m.F_Fixed32Defaulted != nil { - return *m.F_Fixed32Defaulted - } - return Default_GoTest_F_Fixed32Defaulted -} - -func (m *GoTest) GetF_Fixed64Defaulted() uint64 { - if m != nil && m.F_Fixed64Defaulted != nil { - return *m.F_Fixed64Defaulted - } - return Default_GoTest_F_Fixed64Defaulted -} - -func (m *GoTest) GetF_Uint32Defaulted() uint32 { - if m != nil && m.F_Uint32Defaulted != nil { - return *m.F_Uint32Defaulted - } - return Default_GoTest_F_Uint32Defaulted -} - -func (m *GoTest) GetF_Uint64Defaulted() uint64 { - if m != nil && m.F_Uint64Defaulted != nil { - return *m.F_Uint64Defaulted - } - return Default_GoTest_F_Uint64Defaulted -} - -func (m *GoTest) GetF_FloatDefaulted() float32 { - if m != nil && m.F_FloatDefaulted != nil { - return *m.F_FloatDefaulted - } - return Default_GoTest_F_FloatDefaulted -} - -func (m *GoTest) GetF_DoubleDefaulted() float64 { - if m != nil && m.F_DoubleDefaulted != nil { - return *m.F_DoubleDefaulted - } - return Default_GoTest_F_DoubleDefaulted -} - -func (m *GoTest) GetF_StringDefaulted() string { - if m != nil && m.F_StringDefaulted != nil { - return *m.F_StringDefaulted - } - return Default_GoTest_F_StringDefaulted -} - -func (m *GoTest) GetF_BytesDefaulted() []byte { - if m != nil && m.F_BytesDefaulted != nil { - return m.F_BytesDefaulted - } - return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) -} - -func (m *GoTest) GetF_Sint32Defaulted() int32 { - if m != nil && m.F_Sint32Defaulted != nil { - return *m.F_Sint32Defaulted - } - return Default_GoTest_F_Sint32Defaulted -} - -func (m *GoTest) GetF_Sint64Defaulted() int64 { - if m != nil && m.F_Sint64Defaulted != nil { - return *m.F_Sint64Defaulted - } - return Default_GoTest_F_Sint64Defaulted -} - -func (m *GoTest) GetF_BoolRepeatedPacked() []bool { - if m != nil { - return m.F_BoolRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { - if m != nil { - return m.F_Int32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { - if m != nil { - return m.F_Int64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Fixed32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Fixed64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Uint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Uint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { - if m != nil { - return m.F_FloatRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { - if m != nil { - return m.F_DoubleRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { - if m != nil { - return m.F_Sint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { - if m != nil { - return m.F_Sint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { - if m != nil { - return m.Requiredgroup - } - return nil -} - -func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { - if m != nil { - return m.Repeatedgroup - } - return nil -} - -func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil -} - -// Required, repeated, and optional groups. -type GoTest_RequiredGroup struct { - RequiredField *string `protobuf:"bytes,71,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } -func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RequiredGroup) ProtoMessage() {} -func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } - -func (m *GoTest_RequiredGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_RepeatedGroup struct { - RequiredField *string `protobuf:"bytes,81,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } -func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RepeatedGroup) ProtoMessage() {} -func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } - -func (m *GoTest_RepeatedGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,91,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } -func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_OptionalGroup) ProtoMessage() {} -func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} } - -func (m *GoTest_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -type GoSkipTest struct { - SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"` - SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"` - SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"` - SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"` - Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } -func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest) ProtoMessage() {} -func (*GoSkipTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *GoSkipTest) GetSkipInt32() int32 { - if m != nil && m.SkipInt32 != nil { - return *m.SkipInt32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed32() uint32 { - if m != nil && m.SkipFixed32 != nil { - return *m.SkipFixed32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed64() uint64 { - if m != nil && m.SkipFixed64 != nil { - return *m.SkipFixed64 - } - return 0 -} - -func (m *GoSkipTest) GetSkipString() string { - if m != nil && m.SkipString != nil { - return *m.SkipString - } - return "" -} - -func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { - if m != nil { - return m.Skipgroup - } - return nil -} - -type GoSkipTest_SkipGroup struct { - GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"` - GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } -func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest_SkipGroup) ProtoMessage() {} -func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } - -func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { - if m != nil && m.GroupInt32 != nil { - return *m.GroupInt32 - } - return 0 -} - -func (m *GoSkipTest_SkipGroup) GetGroupString() string { - if m != nil && m.GroupString != nil { - return *m.GroupString - } - return "" -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -type NonPackedTest struct { - A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } -func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } -func (*NonPackedTest) ProtoMessage() {} -func (*NonPackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *NonPackedTest) GetA() []int32 { - if m != nil { - return m.A - } - return nil -} - -type PackedTest struct { - B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PackedTest) Reset() { *m = PackedTest{} } -func (m *PackedTest) String() string { return proto.CompactTextString(m) } -func (*PackedTest) ProtoMessage() {} -func (*PackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *PackedTest) GetB() []int32 { - if m != nil { - return m.B - } - return nil -} - -type MaxTag struct { - // Maximum possible tag number. - LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MaxTag) Reset() { *m = MaxTag{} } -func (m *MaxTag) String() string { return proto.CompactTextString(m) } -func (*MaxTag) ProtoMessage() {} -func (*MaxTag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *MaxTag) GetLastField() string { - if m != nil && m.LastField != nil { - return *m.LastField - } - return "" -} - -type OldMessage struct { - Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage) Reset() { *m = OldMessage{} } -func (m *OldMessage) String() string { return proto.CompactTextString(m) } -func (*OldMessage) ProtoMessage() {} -func (*OldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *OldMessage) GetNested() *OldMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *OldMessage) GetNum() int32 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type OldMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } -func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*OldMessage_Nested) ProtoMessage() {} -func (*OldMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } - -func (m *OldMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -type NewMessage struct { - Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - // This is an int32 in OldMessage. - Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage) Reset() { *m = NewMessage{} } -func (m *NewMessage) String() string { return proto.CompactTextString(m) } -func (*NewMessage) ProtoMessage() {} -func (*NewMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *NewMessage) GetNested() *NewMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *NewMessage) GetNum() int64 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type NewMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } -func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*NewMessage_Nested) ProtoMessage() {} -func (*NewMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} } - -func (m *NewMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *NewMessage_Nested) GetFoodGroup() string { - if m != nil && m.FoodGroup != nil { - return *m.FoodGroup - } - return "" -} - -type InnerMessage struct { - Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` - Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` - Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InnerMessage) Reset() { *m = InnerMessage{} } -func (m *InnerMessage) String() string { return proto.CompactTextString(m) } -func (*InnerMessage) ProtoMessage() {} -func (*InnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -const Default_InnerMessage_Port int32 = 4000 - -func (m *InnerMessage) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *InnerMessage) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return Default_InnerMessage_Port -} - -func (m *InnerMessage) GetConnected() bool { - if m != nil && m.Connected != nil { - return *m.Connected - } - return false -} - -type OtherMessage struct { - Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` - Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OtherMessage) Reset() { *m = OtherMessage{} } -func (m *OtherMessage) String() string { return proto.CompactTextString(m) } -func (*OtherMessage) ProtoMessage() {} -func (*OtherMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -var extRange_OtherMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OtherMessage -} -func (m *OtherMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -func (m *OtherMessage) GetKey() int64 { - if m != nil && m.Key != nil { - return *m.Key - } - return 0 -} - -func (m *OtherMessage) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *OtherMessage) GetWeight() float32 { - if m != nil && m.Weight != nil { - return *m.Weight - } - return 0 -} - -func (m *OtherMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -type RequiredInnerMessage struct { - LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} } -func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) } -func (*RequiredInnerMessage) ProtoMessage() {} -func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage { - if m != nil { - return m.LeoFinallyWonAnOscar - } - return nil -} - -type MyMessage struct { - Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` - Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` - Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` - Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` - WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"` - RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"` - Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` - Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` - // This field becomes [][]byte in the generated code. - RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"` - Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage) Reset() { *m = MyMessage{} } -func (m *MyMessage) String() string { return proto.CompactTextString(m) } -func (*MyMessage) ProtoMessage() {} -func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -var extRange_MyMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessage -} -func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -func (m *MyMessage) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *MyMessage) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MyMessage) GetQuote() string { - if m != nil && m.Quote != nil { - return *m.Quote - } - return "" -} - -func (m *MyMessage) GetPet() []string { - if m != nil { - return m.Pet - } - return nil -} - -func (m *MyMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -func (m *MyMessage) GetOthers() []*OtherMessage { - if m != nil { - return m.Others - } - return nil -} - -func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage { - if m != nil { - return m.WeMustGoDeeper - } - return nil -} - -func (m *MyMessage) GetRepInner() []*InnerMessage { - if m != nil { - return m.RepInner - } - return nil -} - -func (m *MyMessage) GetBikeshed() MyMessage_Color { - if m != nil && m.Bikeshed != nil { - return *m.Bikeshed - } - return MyMessage_RED -} - -func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { - if m != nil { - return m.Somegroup - } - return nil -} - -func (m *MyMessage) GetRepBytes() [][]byte { - if m != nil { - return m.RepBytes - } - return nil -} - -func (m *MyMessage) GetBigfloat() float64 { - if m != nil && m.Bigfloat != nil { - return *m.Bigfloat - } - return 0 -} - -type MyMessage_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } -func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*MyMessage_SomeGroup) ProtoMessage() {} -func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } - -func (m *MyMessage_SomeGroup) GetGroupField() int32 { - if m != nil && m.GroupField != nil { - return *m.GroupField - } - return 0 -} - -type Ext struct { - Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Ext) Reset() { *m = Ext{} } -func (m *Ext) String() string { return proto.CompactTextString(m) } -func (*Ext) ProtoMessage() {} -func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -func (m *Ext) GetData() string { - if m != nil && m.Data != nil { - return *m.Data - } - return "" -} - -var E_Ext_More = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*Ext)(nil), - Field: 103, - Name: "testdata.Ext.more", - Tag: "bytes,103,opt,name=more", -} - -var E_Ext_Text = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*string)(nil), - Field: 104, - Name: "testdata.Ext.text", - Tag: "bytes,104,opt,name=text", -} - -var E_Ext_Number = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 105, - Name: "testdata.Ext.number", - Tag: "varint,105,opt,name=number", -} - -type ComplexExtension struct { - First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"` - Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"` - Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ComplexExtension) Reset() { *m = ComplexExtension{} } -func (m *ComplexExtension) String() string { return proto.CompactTextString(m) } -func (*ComplexExtension) ProtoMessage() {} -func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } - -func (m *ComplexExtension) GetFirst() int32 { - if m != nil && m.First != nil { - return *m.First - } - return 0 -} - -func (m *ComplexExtension) GetSecond() int32 { - if m != nil && m.Second != nil { - return *m.Second - } - return 0 -} - -func (m *ComplexExtension) GetThird() []int32 { - if m != nil { - return m.Third - } - return nil -} - -type DefaultsMessage struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } -func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } -func (*DefaultsMessage) ProtoMessage() {} -func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } - -var extRange_DefaultsMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_DefaultsMessage -} -func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -type MyMessageSet struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } -func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } -func (*MyMessageSet) ProtoMessage() {} -func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -func (m *MyMessageSet) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(m.ExtensionMap()) -} -func (m *MyMessageSet) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) -} -func (m *MyMessageSet) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(m.XXX_extensions) -} -func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) -} - -// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler -var _ proto.Marshaler = (*MyMessageSet)(nil) -var _ proto.Unmarshaler = (*MyMessageSet)(nil) - -var extRange_MyMessageSet = []proto.ExtensionRange{ - {100, 2147483646}, -} - -func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessageSet -} -func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -type Empty struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -type MessageList struct { - Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList) Reset() { *m = MessageList{} } -func (m *MessageList) String() string { return proto.CompactTextString(m) } -func (*MessageList) ProtoMessage() {} -func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -func (m *MessageList) GetMessage() []*MessageList_Message { - if m != nil { - return m.Message - } - return nil -} - -type MessageList_Message struct { - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } -func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } -func (*MessageList_Message) ProtoMessage() {} -func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} } - -func (m *MessageList_Message) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MessageList_Message) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -type Strings struct { - StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"` - BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Strings) Reset() { *m = Strings{} } -func (m *Strings) String() string { return proto.CompactTextString(m) } -func (*Strings) ProtoMessage() {} -func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *Strings) GetStringField() string { - if m != nil && m.StringField != nil { - return *m.StringField - } - return "" -} - -func (m *Strings) GetBytesField() []byte { - if m != nil { - return m.BytesField - } - return nil -} - -type Defaults struct { - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,def=1" json:"F_Bool,omitempty"` - F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,def=32" json:"F_Int32,omitempty"` - F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,def=64" json:"F_Int64,omitempty"` - F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,def=320" json:"F_Fixed32,omitempty"` - F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,def=640" json:"F_Fixed64,omitempty"` - F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,def=3200" json:"F_Uint32,omitempty"` - F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,def=6400" json:"F_Uint64,omitempty"` - F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,def=314159" json:"F_Float,omitempty"` - F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,def=271828" json:"F_Double,omitempty"` - F_String *string `protobuf:"bytes,10,opt,name=F_String,json=fString,def=hello, \"world!\"\n" json:"F_String,omitempty"` - F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,def=Bignose" json:"F_Bytes,omitempty"` - F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,def=-32" json:"F_Sint32,omitempty"` - F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,def=-64" json:"F_Sint64,omitempty"` - F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` - // More fields with crazy defaults. - F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=fPinf,def=inf" json:"F_Pinf,omitempty"` - F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=fNinf,def=-inf" json:"F_Ninf,omitempty"` - F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=fNan,def=nan" json:"F_Nan,omitempty"` - // Sub-message. - Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` - // Redundant but explicit defaults. - StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Defaults) Reset() { *m = Defaults{} } -func (m *Defaults) String() string { return proto.CompactTextString(m) } -func (*Defaults) ProtoMessage() {} -func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -const Default_Defaults_F_Bool bool = true -const Default_Defaults_F_Int32 int32 = 32 -const Default_Defaults_F_Int64 int64 = 64 -const Default_Defaults_F_Fixed32 uint32 = 320 -const Default_Defaults_F_Fixed64 uint64 = 640 -const Default_Defaults_F_Uint32 uint32 = 3200 -const Default_Defaults_F_Uint64 uint64 = 6400 -const Default_Defaults_F_Float float32 = 314159 -const Default_Defaults_F_Double float64 = 271828 -const Default_Defaults_F_String string = "hello, \"world!\"\n" - -var Default_Defaults_F_Bytes []byte = []byte("Bignose") - -const Default_Defaults_F_Sint32 int32 = -32 -const Default_Defaults_F_Sint64 int64 = -64 -const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN - -var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) -var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) -var Default_Defaults_F_Nan float32 = float32(math.NaN()) - -func (m *Defaults) GetF_Bool() bool { - if m != nil && m.F_Bool != nil { - return *m.F_Bool - } - return Default_Defaults_F_Bool -} - -func (m *Defaults) GetF_Int32() int32 { - if m != nil && m.F_Int32 != nil { - return *m.F_Int32 - } - return Default_Defaults_F_Int32 -} - -func (m *Defaults) GetF_Int64() int64 { - if m != nil && m.F_Int64 != nil { - return *m.F_Int64 - } - return Default_Defaults_F_Int64 -} - -func (m *Defaults) GetF_Fixed32() uint32 { - if m != nil && m.F_Fixed32 != nil { - return *m.F_Fixed32 - } - return Default_Defaults_F_Fixed32 -} - -func (m *Defaults) GetF_Fixed64() uint64 { - if m != nil && m.F_Fixed64 != nil { - return *m.F_Fixed64 - } - return Default_Defaults_F_Fixed64 -} - -func (m *Defaults) GetF_Uint32() uint32 { - if m != nil && m.F_Uint32 != nil { - return *m.F_Uint32 - } - return Default_Defaults_F_Uint32 -} - -func (m *Defaults) GetF_Uint64() uint64 { - if m != nil && m.F_Uint64 != nil { - return *m.F_Uint64 - } - return Default_Defaults_F_Uint64 -} - -func (m *Defaults) GetF_Float() float32 { - if m != nil && m.F_Float != nil { - return *m.F_Float - } - return Default_Defaults_F_Float -} - -func (m *Defaults) GetF_Double() float64 { - if m != nil && m.F_Double != nil { - return *m.F_Double - } - return Default_Defaults_F_Double -} - -func (m *Defaults) GetF_String() string { - if m != nil && m.F_String != nil { - return *m.F_String - } - return Default_Defaults_F_String -} - -func (m *Defaults) GetF_Bytes() []byte { - if m != nil && m.F_Bytes != nil { - return m.F_Bytes - } - return append([]byte(nil), Default_Defaults_F_Bytes...) -} - -func (m *Defaults) GetF_Sint32() int32 { - if m != nil && m.F_Sint32 != nil { - return *m.F_Sint32 - } - return Default_Defaults_F_Sint32 -} - -func (m *Defaults) GetF_Sint64() int64 { - if m != nil && m.F_Sint64 != nil { - return *m.F_Sint64 - } - return Default_Defaults_F_Sint64 -} - -func (m *Defaults) GetF_Enum() Defaults_Color { - if m != nil && m.F_Enum != nil { - return *m.F_Enum - } - return Default_Defaults_F_Enum -} - -func (m *Defaults) GetF_Pinf() float32 { - if m != nil && m.F_Pinf != nil { - return *m.F_Pinf - } - return Default_Defaults_F_Pinf -} - -func (m *Defaults) GetF_Ninf() float32 { - if m != nil && m.F_Ninf != nil { - return *m.F_Ninf - } - return Default_Defaults_F_Ninf -} - -func (m *Defaults) GetF_Nan() float32 { - if m != nil && m.F_Nan != nil { - return *m.F_Nan - } - return Default_Defaults_F_Nan -} - -func (m *Defaults) GetSub() *SubDefaults { - if m != nil { - return m.Sub - } - return nil -} - -func (m *Defaults) GetStrZero() string { - if m != nil && m.StrZero != nil { - return *m.StrZero - } - return "" -} - -type SubDefaults struct { - N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SubDefaults) Reset() { *m = SubDefaults{} } -func (m *SubDefaults) String() string { return proto.CompactTextString(m) } -func (*SubDefaults) ProtoMessage() {} -func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } - -const Default_SubDefaults_N int64 = 7 - -func (m *SubDefaults) GetN() int64 { - if m != nil && m.N != nil { - return *m.N - } - return Default_SubDefaults_N -} - -type RepeatedEnum struct { - Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } -func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } -func (*RepeatedEnum) ProtoMessage() {} -func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } - -func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { - if m != nil { - return m.Color - } - return nil -} - -type MoreRepeated struct { - Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` - BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"` - Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` - IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"` - Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"` - Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` - Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } -func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } -func (*MoreRepeated) ProtoMessage() {} -func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } - -func (m *MoreRepeated) GetBools() []bool { - if m != nil { - return m.Bools - } - return nil -} - -func (m *MoreRepeated) GetBoolsPacked() []bool { - if m != nil { - return m.BoolsPacked - } - return nil -} - -func (m *MoreRepeated) GetInts() []int32 { - if m != nil { - return m.Ints - } - return nil -} - -func (m *MoreRepeated) GetIntsPacked() []int32 { - if m != nil { - return m.IntsPacked - } - return nil -} - -func (m *MoreRepeated) GetInt64SPacked() []int64 { - if m != nil { - return m.Int64SPacked - } - return nil -} - -func (m *MoreRepeated) GetStrings() []string { - if m != nil { - return m.Strings - } - return nil -} - -func (m *MoreRepeated) GetFixeds() []uint32 { - if m != nil { - return m.Fixeds - } - return nil -} - -type GroupOld struct { - G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld) Reset() { *m = GroupOld{} } -func (m *GroupOld) String() string { return proto.CompactTextString(m) } -func (*GroupOld) ProtoMessage() {} -func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } - -func (m *GroupOld) GetG() *GroupOld_G { - if m != nil { - return m.G - } - return nil -} - -type GroupOld_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } -func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } -func (*GroupOld_G) ProtoMessage() {} -func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24, 0} } - -func (m *GroupOld_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type GroupNew struct { - G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew) Reset() { *m = GroupNew{} } -func (m *GroupNew) String() string { return proto.CompactTextString(m) } -func (*GroupNew) ProtoMessage() {} -func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } - -func (m *GroupNew) GetG() *GroupNew_G { - if m != nil { - return m.G - } - return nil -} - -type GroupNew_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } -func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } -func (*GroupNew_G) ProtoMessage() {} -func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} } - -func (m *GroupNew_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *GroupNew_G) GetY() int32 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type FloatingPoint struct { - F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } -func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } -func (*FloatingPoint) ProtoMessage() {} -func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } - -func (m *FloatingPoint) GetF() float64 { - if m != nil && m.F != nil { - return *m.F - } - return 0 -} - -type MessageWithMap struct { - NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} -func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } - -func (m *MessageWithMap) GetNameMapping() map[int32]string { - if m != nil { - return m.NameMapping - } - return nil -} - -func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { - if m != nil { - return m.MsgMapping - } - return nil -} - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -func (m *MessageWithMap) GetStrToStr() map[string]string { - if m != nil { - return m.StrToStr - } - return nil -} - -type Oneof struct { - // Types that are valid to be assigned to Union: - // *Oneof_F_Bool - // *Oneof_F_Int32 - // *Oneof_F_Int64 - // *Oneof_F_Fixed32 - // *Oneof_F_Fixed64 - // *Oneof_F_Uint32 - // *Oneof_F_Uint64 - // *Oneof_F_Float - // *Oneof_F_Double - // *Oneof_F_String - // *Oneof_F_Bytes - // *Oneof_F_Sint32 - // *Oneof_F_Sint64 - // *Oneof_F_Enum - // *Oneof_F_Message - // *Oneof_FGroup - // *Oneof_F_Largest_Tag - Union isOneof_Union `protobuf_oneof:"union"` - // Types that are valid to be assigned to Tormato: - // *Oneof_Value - Tormato isOneof_Tormato `protobuf_oneof:"tormato"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Oneof) Reset() { *m = Oneof{} } -func (m *Oneof) String() string { return proto.CompactTextString(m) } -func (*Oneof) ProtoMessage() {} -func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } - -type isOneof_Union interface { - isOneof_Union() -} -type isOneof_Tormato interface { - isOneof_Tormato() -} - -type Oneof_F_Bool struct { - F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,oneof"` -} -type Oneof_F_Int32 struct { - F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,oneof"` -} -type Oneof_F_Int64 struct { - F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,oneof"` -} -type Oneof_F_Fixed32 struct { - F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,oneof"` -} -type Oneof_F_Fixed64 struct { - F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,oneof"` -} -type Oneof_F_Uint32 struct { - F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,oneof"` -} -type Oneof_F_Uint64 struct { - F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,oneof"` -} -type Oneof_F_Float struct { - F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,oneof"` -} -type Oneof_F_Double struct { - F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,oneof"` -} -type Oneof_F_String struct { - F_String string `protobuf:"bytes,10,opt,name=F_String,json=fString,oneof"` -} -type Oneof_F_Bytes struct { - F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,oneof"` -} -type Oneof_F_Sint32 struct { - F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,oneof"` -} -type Oneof_F_Sint64 struct { - F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,oneof"` -} -type Oneof_F_Enum struct { - F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.MyMessage_Color,oneof"` -} -type Oneof_F_Message struct { - F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=fMessage,oneof"` -} -type Oneof_FGroup struct { - FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"` -} -type Oneof_F_Largest_Tag struct { - F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=fLargestTag,oneof"` -} -type Oneof_Value struct { - Value int32 `protobuf:"varint,100,opt,name=value,oneof"` -} - -func (*Oneof_F_Bool) isOneof_Union() {} -func (*Oneof_F_Int32) isOneof_Union() {} -func (*Oneof_F_Int64) isOneof_Union() {} -func (*Oneof_F_Fixed32) isOneof_Union() {} -func (*Oneof_F_Fixed64) isOneof_Union() {} -func (*Oneof_F_Uint32) isOneof_Union() {} -func (*Oneof_F_Uint64) isOneof_Union() {} -func (*Oneof_F_Float) isOneof_Union() {} -func (*Oneof_F_Double) isOneof_Union() {} -func (*Oneof_F_String) isOneof_Union() {} -func (*Oneof_F_Bytes) isOneof_Union() {} -func (*Oneof_F_Sint32) isOneof_Union() {} -func (*Oneof_F_Sint64) isOneof_Union() {} -func (*Oneof_F_Enum) isOneof_Union() {} -func (*Oneof_F_Message) isOneof_Union() {} -func (*Oneof_FGroup) isOneof_Union() {} -func (*Oneof_F_Largest_Tag) isOneof_Union() {} -func (*Oneof_Value) isOneof_Tormato() {} - -func (m *Oneof) GetUnion() isOneof_Union { - if m != nil { - return m.Union - } - return nil -} -func (m *Oneof) GetTormato() isOneof_Tormato { - if m != nil { - return m.Tormato - } - return nil -} - -func (m *Oneof) GetF_Bool() bool { - if x, ok := m.GetUnion().(*Oneof_F_Bool); ok { - return x.F_Bool - } - return false -} - -func (m *Oneof) GetF_Int32() int32 { - if x, ok := m.GetUnion().(*Oneof_F_Int32); ok { - return x.F_Int32 - } - return 0 -} - -func (m *Oneof) GetF_Int64() int64 { - if x, ok := m.GetUnion().(*Oneof_F_Int64); ok { - return x.F_Int64 - } - return 0 -} - -func (m *Oneof) GetF_Fixed32() uint32 { - if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok { - return x.F_Fixed32 - } - return 0 -} - -func (m *Oneof) GetF_Fixed64() uint64 { - if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok { - return x.F_Fixed64 - } - return 0 -} - -func (m *Oneof) GetF_Uint32() uint32 { - if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok { - return x.F_Uint32 - } - return 0 -} - -func (m *Oneof) GetF_Uint64() uint64 { - if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok { - return x.F_Uint64 - } - return 0 -} - -func (m *Oneof) GetF_Float() float32 { - if x, ok := m.GetUnion().(*Oneof_F_Float); ok { - return x.F_Float - } - return 0 -} - -func (m *Oneof) GetF_Double() float64 { - if x, ok := m.GetUnion().(*Oneof_F_Double); ok { - return x.F_Double - } - return 0 -} - -func (m *Oneof) GetF_String() string { - if x, ok := m.GetUnion().(*Oneof_F_String); ok { - return x.F_String - } - return "" -} - -func (m *Oneof) GetF_Bytes() []byte { - if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok { - return x.F_Bytes - } - return nil -} - -func (m *Oneof) GetF_Sint32() int32 { - if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok { - return x.F_Sint32 - } - return 0 -} - -func (m *Oneof) GetF_Sint64() int64 { - if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok { - return x.F_Sint64 - } - return 0 -} - -func (m *Oneof) GetF_Enum() MyMessage_Color { - if x, ok := m.GetUnion().(*Oneof_F_Enum); ok { - return x.F_Enum - } - return MyMessage_RED -} - -func (m *Oneof) GetF_Message() *GoTestField { - if x, ok := m.GetUnion().(*Oneof_F_Message); ok { - return x.F_Message - } - return nil -} - -func (m *Oneof) GetFGroup() *Oneof_F_Group { - if x, ok := m.GetUnion().(*Oneof_FGroup); ok { - return x.FGroup - } - return nil -} - -func (m *Oneof) GetF_Largest_Tag() int32 { - if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok { - return x.F_Largest_Tag - } - return 0 -} - -func (m *Oneof) GetValue() int32 { - if x, ok := m.GetTormato().(*Oneof_Value); ok { - return x.Value - } - return 0 -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{ - (*Oneof_F_Bool)(nil), - (*Oneof_F_Int32)(nil), - (*Oneof_F_Int64)(nil), - (*Oneof_F_Fixed32)(nil), - (*Oneof_F_Fixed64)(nil), - (*Oneof_F_Uint32)(nil), - (*Oneof_F_Uint64)(nil), - (*Oneof_F_Float)(nil), - (*Oneof_F_Double)(nil), - (*Oneof_F_String)(nil), - (*Oneof_F_Bytes)(nil), - (*Oneof_F_Sint32)(nil), - (*Oneof_F_Sint64)(nil), - (*Oneof_F_Enum)(nil), - (*Oneof_F_Message)(nil), - (*Oneof_FGroup)(nil), - (*Oneof_F_Largest_Tag)(nil), - (*Oneof_Value)(nil), - } -} - -func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Oneof) - // union - switch x := m.Union.(type) { - case *Oneof_F_Bool: - t := uint64(0) - if x.F_Bool { - t = 1 - } - b.EncodeVarint(1<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *Oneof_F_Int32: - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Int32)) - case *Oneof_F_Int64: - b.EncodeVarint(3<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Int64)) - case *Oneof_F_Fixed32: - b.EncodeVarint(4<<3 | proto.WireFixed32) - b.EncodeFixed32(uint64(x.F_Fixed32)) - case *Oneof_F_Fixed64: - b.EncodeVarint(5<<3 | proto.WireFixed64) - b.EncodeFixed64(uint64(x.F_Fixed64)) - case *Oneof_F_Uint32: - b.EncodeVarint(6<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Uint32)) - case *Oneof_F_Uint64: - b.EncodeVarint(7<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Uint64)) - case *Oneof_F_Float: - b.EncodeVarint(8<<3 | proto.WireFixed32) - b.EncodeFixed32(uint64(math.Float32bits(x.F_Float))) - case *Oneof_F_Double: - b.EncodeVarint(9<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.F_Double)) - case *Oneof_F_String: - b.EncodeVarint(10<<3 | proto.WireBytes) - b.EncodeStringBytes(x.F_String) - case *Oneof_F_Bytes: - b.EncodeVarint(11<<3 | proto.WireBytes) - b.EncodeRawBytes(x.F_Bytes) - case *Oneof_F_Sint32: - b.EncodeVarint(12<<3 | proto.WireVarint) - b.EncodeZigzag32(uint64(x.F_Sint32)) - case *Oneof_F_Sint64: - b.EncodeVarint(13<<3 | proto.WireVarint) - b.EncodeZigzag64(uint64(x.F_Sint64)) - case *Oneof_F_Enum: - b.EncodeVarint(14<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Enum)) - case *Oneof_F_Message: - b.EncodeVarint(15<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.F_Message); err != nil { - return err - } - case *Oneof_FGroup: - b.EncodeVarint(16<<3 | proto.WireStartGroup) - if err := b.Marshal(x.FGroup); err != nil { - return err - } - b.EncodeVarint(16<<3 | proto.WireEndGroup) - case *Oneof_F_Largest_Tag: - b.EncodeVarint(536870911<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Largest_Tag)) - case nil: - default: - return fmt.Errorf("Oneof.Union has unexpected type %T", x) - } - // tormato - switch x := m.Tormato.(type) { - case *Oneof_Value: - b.EncodeVarint(100<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Value)) - case nil: - default: - return fmt.Errorf("Oneof.Tormato has unexpected type %T", x) - } - return nil -} - -func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Oneof) - switch tag { - case 1: // union.F_Bool - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Bool{x != 0} - return true, err - case 2: // union.F_Int32 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Int32{int32(x)} - return true, err - case 3: // union.F_Int64 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Int64{int64(x)} - return true, err - case 4: // union.F_Fixed32 - if wire != proto.WireFixed32 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed32() - m.Union = &Oneof_F_Fixed32{uint32(x)} - return true, err - case 5: // union.F_Fixed64 - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Union = &Oneof_F_Fixed64{x} - return true, err - case 6: // union.F_Uint32 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Uint32{uint32(x)} - return true, err - case 7: // union.F_Uint64 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Uint64{x} - return true, err - case 8: // union.F_Float - if wire != proto.WireFixed32 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed32() - m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))} - return true, err - case 9: // union.F_Double - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Union = &Oneof_F_Double{math.Float64frombits(x)} - return true, err - case 10: // union.F_String - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &Oneof_F_String{x} - return true, err - case 11: // union.F_Bytes - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Union = &Oneof_F_Bytes{x} - return true, err - case 12: // union.F_Sint32 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag32() - m.Union = &Oneof_F_Sint32{int32(x)} - return true, err - case 13: // union.F_Sint64 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag64() - m.Union = &Oneof_F_Sint64{int64(x)} - return true, err - case 14: // union.F_Enum - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Enum{MyMessage_Color(x)} - return true, err - case 15: // union.F_Message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(GoTestField) - err := b.DecodeMessage(msg) - m.Union = &Oneof_F_Message{msg} - return true, err - case 16: // union.f_group - if wire != proto.WireStartGroup { - return true, proto.ErrInternalBadWireType - } - msg := new(Oneof_F_Group) - err := b.DecodeGroup(msg) - m.Union = &Oneof_FGroup{msg} - return true, err - case 536870911: // union.F_Largest_Tag - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Largest_Tag{int32(x)} - return true, err - case 100: // tormato.value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Tormato = &Oneof_Value{int32(x)} - return true, err - default: - return false, nil - } -} - -func _Oneof_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Oneof) - // union - switch x := m.Union.(type) { - case *Oneof_F_Bool: - n += proto.SizeVarint(1<<3 | proto.WireVarint) - n += 1 - case *Oneof_F_Int32: - n += proto.SizeVarint(2<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Int32)) - case *Oneof_F_Int64: - n += proto.SizeVarint(3<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Int64)) - case *Oneof_F_Fixed32: - n += proto.SizeVarint(4<<3 | proto.WireFixed32) - n += 4 - case *Oneof_F_Fixed64: - n += proto.SizeVarint(5<<3 | proto.WireFixed64) - n += 8 - case *Oneof_F_Uint32: - n += proto.SizeVarint(6<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Uint32)) - case *Oneof_F_Uint64: - n += proto.SizeVarint(7<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Uint64)) - case *Oneof_F_Float: - n += proto.SizeVarint(8<<3 | proto.WireFixed32) - n += 4 - case *Oneof_F_Double: - n += proto.SizeVarint(9<<3 | proto.WireFixed64) - n += 8 - case *Oneof_F_String: - n += proto.SizeVarint(10<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.F_String))) - n += len(x.F_String) - case *Oneof_F_Bytes: - n += proto.SizeVarint(11<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.F_Bytes))) - n += len(x.F_Bytes) - case *Oneof_F_Sint32: - n += proto.SizeVarint(12<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31)))) - case *Oneof_F_Sint64: - n += proto.SizeVarint(13<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63)))) - case *Oneof_F_Enum: - n += proto.SizeVarint(14<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Enum)) - case *Oneof_F_Message: - s := proto.Size(x.F_Message) - n += proto.SizeVarint(15<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Oneof_FGroup: - n += proto.SizeVarint(16<<3 | proto.WireStartGroup) - n += proto.Size(x.FGroup) - n += proto.SizeVarint(16<<3 | proto.WireEndGroup) - case *Oneof_F_Largest_Tag: - n += proto.SizeVarint(536870911<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Largest_Tag)) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - // tormato - switch x := m.Tormato.(type) { - case *Oneof_Value: - n += proto.SizeVarint(100<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Value)) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Oneof_F_Group struct { - X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } -func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } -func (*Oneof_F_Group) ProtoMessage() {} -func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28, 0} } - -func (m *Oneof_F_Group) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type Communique struct { - MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` - // This is a oneof, called "union". - // - // Types that are valid to be assigned to Union: - // *Communique_Number - // *Communique_Name - // *Communique_Data - // *Communique_TempC - // *Communique_Col - // *Communique_Msg - Union isCommunique_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Communique) Reset() { *m = Communique{} } -func (m *Communique) String() string { return proto.CompactTextString(m) } -func (*Communique) ProtoMessage() {} -func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } - -type isCommunique_Union interface { - isCommunique_Union() -} - -type Communique_Number struct { - Number int32 `protobuf:"varint,5,opt,name=number,oneof"` -} -type Communique_Name struct { - Name string `protobuf:"bytes,6,opt,name=name,oneof"` -} -type Communique_Data struct { - Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` -} -type Communique_TempC struct { - TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` -} -type Communique_Col struct { - Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"` -} -type Communique_Msg struct { - Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"` -} - -func (*Communique_Number) isCommunique_Union() {} -func (*Communique_Name) isCommunique_Union() {} -func (*Communique_Data) isCommunique_Union() {} -func (*Communique_TempC) isCommunique_Union() {} -func (*Communique_Col) isCommunique_Union() {} -func (*Communique_Msg) isCommunique_Union() {} - -func (m *Communique) GetUnion() isCommunique_Union { - if m != nil { - return m.Union - } - return nil -} - -func (m *Communique) GetMakeMeCry() bool { - if m != nil && m.MakeMeCry != nil { - return *m.MakeMeCry - } - return false -} - -func (m *Communique) GetNumber() int32 { - if x, ok := m.GetUnion().(*Communique_Number); ok { - return x.Number - } - return 0 -} - -func (m *Communique) GetName() string { - if x, ok := m.GetUnion().(*Communique_Name); ok { - return x.Name - } - return "" -} - -func (m *Communique) GetData() []byte { - if x, ok := m.GetUnion().(*Communique_Data); ok { - return x.Data - } - return nil -} - -func (m *Communique) GetTempC() float64 { - if x, ok := m.GetUnion().(*Communique_TempC); ok { - return x.TempC - } - return 0 -} - -func (m *Communique) GetCol() MyMessage_Color { - if x, ok := m.GetUnion().(*Communique_Col); ok { - return x.Col - } - return MyMessage_RED -} - -func (m *Communique) GetMsg() *Strings { - if x, ok := m.GetUnion().(*Communique_Msg); ok { - return x.Msg - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ - (*Communique_Number)(nil), - (*Communique_Name)(nil), - (*Communique_Data)(nil), - (*Communique_TempC)(nil), - (*Communique_Col)(nil), - (*Communique_Msg)(nil), - } -} - -func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Communique) - // union - switch x := m.Union.(type) { - case *Communique_Number: - b.EncodeVarint(5<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Number)) - case *Communique_Name: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Name) - case *Communique_Data: - b.EncodeVarint(7<<3 | proto.WireBytes) - b.EncodeRawBytes(x.Data) - case *Communique_TempC: - b.EncodeVarint(8<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.TempC)) - case *Communique_Col: - b.EncodeVarint(9<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Col)) - case *Communique_Msg: - b.EncodeVarint(10<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Msg); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Communique.Union has unexpected type %T", x) - } - return nil -} - -func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Communique) - switch tag { - case 5: // union.number - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Number{int32(x)} - return true, err - case 6: // union.name - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &Communique_Name{x} - return true, err - case 7: // union.data - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Union = &Communique_Data{x} - return true, err - case 8: // union.temp_c - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Union = &Communique_TempC{math.Float64frombits(x)} - return true, err - case 9: // union.col - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Col{MyMessage_Color(x)} - return true, err - case 10: // union.msg - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Strings) - err := b.DecodeMessage(msg) - m.Union = &Communique_Msg{msg} - return true, err - default: - return false, nil - } -} - -func _Communique_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Communique) - // union - switch x := m.Union.(type) { - case *Communique_Number: - n += proto.SizeVarint(5<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Number)) - case *Communique_Name: - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Name))) - n += len(x.Name) - case *Communique_Data: - n += proto.SizeVarint(7<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Data))) - n += len(x.Data) - case *Communique_TempC: - n += proto.SizeVarint(8<<3 | proto.WireFixed64) - n += 8 - case *Communique_Col: - n += proto.SizeVarint(9<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Col)) - case *Communique_Msg: - s := proto.Size(x.Msg) - n += proto.SizeVarint(10<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -var E_Greeting = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: ([]string)(nil), - Field: 106, - Name: "testdata.greeting", - Tag: "bytes,106,rep,name=greeting", -} - -var E_Complex = &proto.ExtensionDesc{ - ExtendedType: (*OtherMessage)(nil), - ExtensionType: (*ComplexExtension)(nil), - Field: 200, - Name: "testdata.complex", - Tag: "bytes,200,opt,name=complex", -} - -var E_RComplex = &proto.ExtensionDesc{ - ExtendedType: (*OtherMessage)(nil), - ExtensionType: ([]*ComplexExtension)(nil), - Field: 201, - Name: "testdata.r_complex", - Tag: "bytes,201,rep,name=r_complex,json=rComplex", -} - -var E_NoDefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 101, - Name: "testdata.no_default_double", - Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble", -} - -var E_NoDefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 102, - Name: "testdata.no_default_float", - Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat", -} - -var E_NoDefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 103, - Name: "testdata.no_default_int32", - Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32", -} - -var E_NoDefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 104, - Name: "testdata.no_default_int64", - Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64", -} - -var E_NoDefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 105, - Name: "testdata.no_default_uint32", - Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32", -} - -var E_NoDefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 106, - Name: "testdata.no_default_uint64", - Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64", -} - -var E_NoDefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 107, - Name: "testdata.no_default_sint32", - Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32", -} - -var E_NoDefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 108, - Name: "testdata.no_default_sint64", - Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64", -} - -var E_NoDefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 109, - Name: "testdata.no_default_fixed32", - Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32", -} - -var E_NoDefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 110, - Name: "testdata.no_default_fixed64", - Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64", -} - -var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 111, - Name: "testdata.no_default_sfixed32", - Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32", -} - -var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 112, - Name: "testdata.no_default_sfixed64", - Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64", -} - -var E_NoDefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 113, - Name: "testdata.no_default_bool", - Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool", -} - -var E_NoDefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 114, - Name: "testdata.no_default_string", - Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString", -} - -var E_NoDefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 115, - Name: "testdata.no_default_bytes", - Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes", -} - -var E_NoDefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 116, - Name: "testdata.no_default_enum", - Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum", -} - -var E_DefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 201, - Name: "testdata.default_double", - Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415", -} - -var E_DefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 202, - Name: "testdata.default_float", - Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14", -} - -var E_DefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 203, - Name: "testdata.default_int32", - Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42", -} - -var E_DefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 204, - Name: "testdata.default_int64", - Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43", -} - -var E_DefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 205, - Name: "testdata.default_uint32", - Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44", -} - -var E_DefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 206, - Name: "testdata.default_uint64", - Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45", -} - -var E_DefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 207, - Name: "testdata.default_sint32", - Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46", -} - -var E_DefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 208, - Name: "testdata.default_sint64", - Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47", -} - -var E_DefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 209, - Name: "testdata.default_fixed32", - Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48", -} - -var E_DefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 210, - Name: "testdata.default_fixed64", - Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49", -} - -var E_DefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 211, - Name: "testdata.default_sfixed32", - Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50", -} - -var E_DefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 212, - Name: "testdata.default_sfixed64", - Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51", -} - -var E_DefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 213, - Name: "testdata.default_bool", - Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1", -} - -var E_DefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 214, - Name: "testdata.default_string", - Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string", -} - -var E_DefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 215, - Name: "testdata.default_bytes", - Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes", -} - -var E_DefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 216, - Name: "testdata.default_enum", - Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", -} - -var E_X201 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 201, - Name: "testdata.x201", - Tag: "bytes,201,opt,name=x201", -} - -var E_X202 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 202, - Name: "testdata.x202", - Tag: "bytes,202,opt,name=x202", -} - -var E_X203 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 203, - Name: "testdata.x203", - Tag: "bytes,203,opt,name=x203", -} - -var E_X204 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 204, - Name: "testdata.x204", - Tag: "bytes,204,opt,name=x204", -} - -var E_X205 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 205, - Name: "testdata.x205", - Tag: "bytes,205,opt,name=x205", -} - -var E_X206 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 206, - Name: "testdata.x206", - Tag: "bytes,206,opt,name=x206", -} - -var E_X207 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 207, - Name: "testdata.x207", - Tag: "bytes,207,opt,name=x207", -} - -var E_X208 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 208, - Name: "testdata.x208", - Tag: "bytes,208,opt,name=x208", -} - -var E_X209 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 209, - Name: "testdata.x209", - Tag: "bytes,209,opt,name=x209", -} - -var E_X210 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 210, - Name: "testdata.x210", - Tag: "bytes,210,opt,name=x210", -} - -var E_X211 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 211, - Name: "testdata.x211", - Tag: "bytes,211,opt,name=x211", -} - -var E_X212 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 212, - Name: "testdata.x212", - Tag: "bytes,212,opt,name=x212", -} - -var E_X213 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 213, - Name: "testdata.x213", - Tag: "bytes,213,opt,name=x213", -} - -var E_X214 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 214, - Name: "testdata.x214", - Tag: "bytes,214,opt,name=x214", -} - -var E_X215 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 215, - Name: "testdata.x215", - Tag: "bytes,215,opt,name=x215", -} - -var E_X216 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 216, - Name: "testdata.x216", - Tag: "bytes,216,opt,name=x216", -} - -var E_X217 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 217, - Name: "testdata.x217", - Tag: "bytes,217,opt,name=x217", -} - -var E_X218 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 218, - Name: "testdata.x218", - Tag: "bytes,218,opt,name=x218", -} - -var E_X219 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 219, - Name: "testdata.x219", - Tag: "bytes,219,opt,name=x219", -} - -var E_X220 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 220, - Name: "testdata.x220", - Tag: "bytes,220,opt,name=x220", -} - -var E_X221 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 221, - Name: "testdata.x221", - Tag: "bytes,221,opt,name=x221", -} - -var E_X222 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 222, - Name: "testdata.x222", - Tag: "bytes,222,opt,name=x222", -} - -var E_X223 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 223, - Name: "testdata.x223", - Tag: "bytes,223,opt,name=x223", -} - -var E_X224 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 224, - Name: "testdata.x224", - Tag: "bytes,224,opt,name=x224", -} - -var E_X225 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 225, - Name: "testdata.x225", - Tag: "bytes,225,opt,name=x225", -} - -var E_X226 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 226, - Name: "testdata.x226", - Tag: "bytes,226,opt,name=x226", -} - -var E_X227 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 227, - Name: "testdata.x227", - Tag: "bytes,227,opt,name=x227", -} - -var E_X228 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 228, - Name: "testdata.x228", - Tag: "bytes,228,opt,name=x228", -} - -var E_X229 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 229, - Name: "testdata.x229", - Tag: "bytes,229,opt,name=x229", -} - -var E_X230 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 230, - Name: "testdata.x230", - Tag: "bytes,230,opt,name=x230", -} - -var E_X231 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 231, - Name: "testdata.x231", - Tag: "bytes,231,opt,name=x231", -} - -var E_X232 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 232, - Name: "testdata.x232", - Tag: "bytes,232,opt,name=x232", -} - -var E_X233 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 233, - Name: "testdata.x233", - Tag: "bytes,233,opt,name=x233", -} - -var E_X234 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 234, - Name: "testdata.x234", - Tag: "bytes,234,opt,name=x234", -} - -var E_X235 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 235, - Name: "testdata.x235", - Tag: "bytes,235,opt,name=x235", -} - -var E_X236 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 236, - Name: "testdata.x236", - Tag: "bytes,236,opt,name=x236", -} - -var E_X237 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 237, - Name: "testdata.x237", - Tag: "bytes,237,opt,name=x237", -} - -var E_X238 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 238, - Name: "testdata.x238", - Tag: "bytes,238,opt,name=x238", -} - -var E_X239 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 239, - Name: "testdata.x239", - Tag: "bytes,239,opt,name=x239", -} - -var E_X240 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 240, - Name: "testdata.x240", - Tag: "bytes,240,opt,name=x240", -} - -var E_X241 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 241, - Name: "testdata.x241", - Tag: "bytes,241,opt,name=x241", -} - -var E_X242 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 242, - Name: "testdata.x242", - Tag: "bytes,242,opt,name=x242", -} - -var E_X243 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 243, - Name: "testdata.x243", - Tag: "bytes,243,opt,name=x243", -} - -var E_X244 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 244, - Name: "testdata.x244", - Tag: "bytes,244,opt,name=x244", -} - -var E_X245 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 245, - Name: "testdata.x245", - Tag: "bytes,245,opt,name=x245", -} - -var E_X246 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 246, - Name: "testdata.x246", - Tag: "bytes,246,opt,name=x246", -} - -var E_X247 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 247, - Name: "testdata.x247", - Tag: "bytes,247,opt,name=x247", -} - -var E_X248 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 248, - Name: "testdata.x248", - Tag: "bytes,248,opt,name=x248", -} - -var E_X249 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 249, - Name: "testdata.x249", - Tag: "bytes,249,opt,name=x249", -} - -var E_X250 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 250, - Name: "testdata.x250", - Tag: "bytes,250,opt,name=x250", -} - -func init() { - proto.RegisterType((*GoEnum)(nil), "testdata.GoEnum") - proto.RegisterType((*GoTestField)(nil), "testdata.GoTestField") - proto.RegisterType((*GoTest)(nil), "testdata.GoTest") - proto.RegisterType((*GoTest_RequiredGroup)(nil), "testdata.GoTest.RequiredGroup") - proto.RegisterType((*GoTest_RepeatedGroup)(nil), "testdata.GoTest.RepeatedGroup") - proto.RegisterType((*GoTest_OptionalGroup)(nil), "testdata.GoTest.OptionalGroup") - proto.RegisterType((*GoSkipTest)(nil), "testdata.GoSkipTest") - proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "testdata.GoSkipTest.SkipGroup") - proto.RegisterType((*NonPackedTest)(nil), "testdata.NonPackedTest") - proto.RegisterType((*PackedTest)(nil), "testdata.PackedTest") - proto.RegisterType((*MaxTag)(nil), "testdata.MaxTag") - proto.RegisterType((*OldMessage)(nil), "testdata.OldMessage") - proto.RegisterType((*OldMessage_Nested)(nil), "testdata.OldMessage.Nested") - proto.RegisterType((*NewMessage)(nil), "testdata.NewMessage") - proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested") - proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage") - proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage") - proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage") - proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage") - proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup") - proto.RegisterType((*Ext)(nil), "testdata.Ext") - proto.RegisterType((*ComplexExtension)(nil), "testdata.ComplexExtension") - proto.RegisterType((*DefaultsMessage)(nil), "testdata.DefaultsMessage") - proto.RegisterType((*MyMessageSet)(nil), "testdata.MyMessageSet") - proto.RegisterType((*Empty)(nil), "testdata.Empty") - proto.RegisterType((*MessageList)(nil), "testdata.MessageList") - proto.RegisterType((*MessageList_Message)(nil), "testdata.MessageList.Message") - proto.RegisterType((*Strings)(nil), "testdata.Strings") - proto.RegisterType((*Defaults)(nil), "testdata.Defaults") - proto.RegisterType((*SubDefaults)(nil), "testdata.SubDefaults") - proto.RegisterType((*RepeatedEnum)(nil), "testdata.RepeatedEnum") - proto.RegisterType((*MoreRepeated)(nil), "testdata.MoreRepeated") - proto.RegisterType((*GroupOld)(nil), "testdata.GroupOld") - proto.RegisterType((*GroupOld_G)(nil), "testdata.GroupOld.G") - proto.RegisterType((*GroupNew)(nil), "testdata.GroupNew") - proto.RegisterType((*GroupNew_G)(nil), "testdata.GroupNew.G") - proto.RegisterType((*FloatingPoint)(nil), "testdata.FloatingPoint") - proto.RegisterType((*MessageWithMap)(nil), "testdata.MessageWithMap") - proto.RegisterType((*Oneof)(nil), "testdata.Oneof") - proto.RegisterType((*Oneof_F_Group)(nil), "testdata.Oneof.F_Group") - proto.RegisterType((*Communique)(nil), "testdata.Communique") - proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) - proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) - proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) - proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) - proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) - proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) - proto.RegisterExtension(E_Ext_More) - proto.RegisterExtension(E_Ext_Text) - proto.RegisterExtension(E_Ext_Number) - proto.RegisterExtension(E_Greeting) - proto.RegisterExtension(E_Complex) - proto.RegisterExtension(E_RComplex) - proto.RegisterExtension(E_NoDefaultDouble) - proto.RegisterExtension(E_NoDefaultFloat) - proto.RegisterExtension(E_NoDefaultInt32) - proto.RegisterExtension(E_NoDefaultInt64) - proto.RegisterExtension(E_NoDefaultUint32) - proto.RegisterExtension(E_NoDefaultUint64) - proto.RegisterExtension(E_NoDefaultSint32) - proto.RegisterExtension(E_NoDefaultSint64) - proto.RegisterExtension(E_NoDefaultFixed32) - proto.RegisterExtension(E_NoDefaultFixed64) - proto.RegisterExtension(E_NoDefaultSfixed32) - proto.RegisterExtension(E_NoDefaultSfixed64) - proto.RegisterExtension(E_NoDefaultBool) - proto.RegisterExtension(E_NoDefaultString) - proto.RegisterExtension(E_NoDefaultBytes) - proto.RegisterExtension(E_NoDefaultEnum) - proto.RegisterExtension(E_DefaultDouble) - proto.RegisterExtension(E_DefaultFloat) - proto.RegisterExtension(E_DefaultInt32) - proto.RegisterExtension(E_DefaultInt64) - proto.RegisterExtension(E_DefaultUint32) - proto.RegisterExtension(E_DefaultUint64) - proto.RegisterExtension(E_DefaultSint32) - proto.RegisterExtension(E_DefaultSint64) - proto.RegisterExtension(E_DefaultFixed32) - proto.RegisterExtension(E_DefaultFixed64) - proto.RegisterExtension(E_DefaultSfixed32) - proto.RegisterExtension(E_DefaultSfixed64) - proto.RegisterExtension(E_DefaultBool) - proto.RegisterExtension(E_DefaultString) - proto.RegisterExtension(E_DefaultBytes) - proto.RegisterExtension(E_DefaultEnum) - proto.RegisterExtension(E_X201) - proto.RegisterExtension(E_X202) - proto.RegisterExtension(E_X203) - proto.RegisterExtension(E_X204) - proto.RegisterExtension(E_X205) - proto.RegisterExtension(E_X206) - proto.RegisterExtension(E_X207) - proto.RegisterExtension(E_X208) - proto.RegisterExtension(E_X209) - proto.RegisterExtension(E_X210) - proto.RegisterExtension(E_X211) - proto.RegisterExtension(E_X212) - proto.RegisterExtension(E_X213) - proto.RegisterExtension(E_X214) - proto.RegisterExtension(E_X215) - proto.RegisterExtension(E_X216) - proto.RegisterExtension(E_X217) - proto.RegisterExtension(E_X218) - proto.RegisterExtension(E_X219) - proto.RegisterExtension(E_X220) - proto.RegisterExtension(E_X221) - proto.RegisterExtension(E_X222) - proto.RegisterExtension(E_X223) - proto.RegisterExtension(E_X224) - proto.RegisterExtension(E_X225) - proto.RegisterExtension(E_X226) - proto.RegisterExtension(E_X227) - proto.RegisterExtension(E_X228) - proto.RegisterExtension(E_X229) - proto.RegisterExtension(E_X230) - proto.RegisterExtension(E_X231) - proto.RegisterExtension(E_X232) - proto.RegisterExtension(E_X233) - proto.RegisterExtension(E_X234) - proto.RegisterExtension(E_X235) - proto.RegisterExtension(E_X236) - proto.RegisterExtension(E_X237) - proto.RegisterExtension(E_X238) - proto.RegisterExtension(E_X239) - proto.RegisterExtension(E_X240) - proto.RegisterExtension(E_X241) - proto.RegisterExtension(E_X242) - proto.RegisterExtension(E_X243) - proto.RegisterExtension(E_X244) - proto.RegisterExtension(E_X245) - proto.RegisterExtension(E_X246) - proto.RegisterExtension(E_X247) - proto.RegisterExtension(E_X248) - proto.RegisterExtension(E_X249) - proto.RegisterExtension(E_X250) -} - -var fileDescriptor0 = []byte{ - // 4407 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x5a, 0x59, 0x77, 0xdb, 0x48, - 0x76, 0x36, 0xc0, 0xfd, 0x92, 0x12, 0xa1, 0xb2, 0xda, 0x4d, 0x4b, 0x5e, 0x60, 0xce, 0x74, 0x37, - 0xbd, 0x69, 0x24, 0x10, 0xa2, 0x6d, 0xba, 0xd3, 0xe7, 0x78, 0xa1, 0x64, 0x9d, 0xb1, 0x44, 0x05, - 0x52, 0x77, 0x9f, 0xe9, 0x3c, 0xf0, 0x50, 0x22, 0x48, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52, - 0xf2, 0xd2, 0x2f, 0xc9, 0x6b, 0xb6, 0x97, 0xbc, 0xe6, 0x29, 0x4f, 0x49, 0xce, 0xc9, 0x9f, 0x48, - 0xba, 0x7b, 0xd6, 0x9e, 0x35, 0xeb, 0x64, 0x5f, 0x26, 0xfb, 0x36, 0x93, 0xe4, 0xa5, 0xe7, 0xd4, - 0xad, 0x02, 0x50, 0x00, 0x09, 0x48, 0x7e, 0x12, 0x51, 0xf5, 0x7d, 0xb7, 0x6e, 0x15, 0xbe, 0xba, - 0xb7, 0x6e, 0x41, 0x00, 0x8e, 0x39, 0x71, 0x56, 0x46, 0x63, 0xdb, 0xb1, 0x49, 0x96, 0xfe, 0xee, - 0xb4, 0x9d, 0x76, 0xf9, 0x3a, 0xa4, 0x37, 0xed, 0x86, 0x75, 0x34, 0x24, 0x57, 0x21, 0xd1, 0xb5, - 0xed, 0x92, 0xa4, 0xca, 0x95, 0x79, 0x6d, 0x6e, 0xc5, 0x45, 0xac, 0x6c, 0x34, 0x9b, 0x06, 0xed, - 0x29, 0xdf, 0x81, 0xfc, 0xa6, 0xbd, 0x6f, 0x4e, 0x9c, 0x8d, 0xbe, 0x39, 0xe8, 0x90, 0x45, 0x48, - 0x3d, 0x6d, 0x1f, 0x98, 0x03, 0x64, 0xe4, 0x8c, 0xd4, 0x80, 0x3e, 0x10, 0x02, 0xc9, 0xfd, 0x93, - 0x91, 0x59, 0x92, 0xb1, 0x31, 0xe9, 0x9c, 0x8c, 0xcc, 0xf2, 0xaf, 0x5c, 0xa1, 0x83, 0x50, 0x26, - 0xb9, 0x0e, 0xc9, 0x2f, 0xf7, 0xad, 0x0e, 0x1f, 0xe5, 0x35, 0x7f, 0x14, 0xd6, 0xbf, 0xf2, 0xe5, - 0xad, 0x9d, 0xc7, 0x46, 0xf2, 0x79, 0xdf, 0x42, 0xfb, 0xfb, 0xed, 0x83, 0x01, 0x35, 0x25, 0x51, - 0xfb, 0x0e, 0x7d, 0xa0, 0xad, 0xbb, 0xed, 0x71, 0x7b, 0x58, 0x4a, 0xa8, 0x52, 0x25, 0x65, 0xa4, - 0x46, 0xf4, 0x81, 0xdc, 0x87, 0x39, 0xc3, 0x7c, 0x71, 0xd4, 0x1f, 0x9b, 0x1d, 0x74, 0xae, 0x94, - 0x54, 0xe5, 0x4a, 0x7e, 0xda, 0x3e, 0x76, 0x1a, 0x73, 0x63, 0x11, 0xcb, 0xc8, 0x23, 0xb3, 0xed, - 0xb8, 0xe4, 0x94, 0x9a, 0x88, 0x25, 0x0b, 0x58, 0x4a, 0x6e, 0x8e, 0x9c, 0xbe, 0x6d, 0xb5, 0x07, - 0x8c, 0x9c, 0x56, 0xa5, 0x18, 0xb2, 0x2d, 0x62, 0xc9, 0x9b, 0x50, 0xdc, 0x68, 0x3d, 0xb4, 0xed, - 0x41, 0xcb, 0xf5, 0xa8, 0x04, 0xaa, 0x5c, 0xc9, 0x1a, 0x73, 0x5d, 0xda, 0xea, 0x4e, 0x89, 0x54, - 0x40, 0xd9, 0x68, 0x6d, 0x59, 0x4e, 0x55, 0xf3, 0x81, 0x79, 0x55, 0xae, 0xa4, 0x8c, 0xf9, 0x2e, - 0x36, 0x4f, 0x21, 0x6b, 0xba, 0x8f, 0x2c, 0xa8, 0x72, 0x25, 0xc1, 0x90, 0x35, 0xdd, 0x43, 0xde, - 0x02, 0xb2, 0xd1, 0xda, 0xe8, 0x1f, 0x9b, 0x1d, 0xd1, 0xea, 0x9c, 0x2a, 0x57, 0x32, 0x86, 0xd2, - 0xe5, 0x1d, 0x33, 0xd0, 0xa2, 0xe5, 0x79, 0x55, 0xae, 0xa4, 0x5d, 0xb4, 0x60, 0xfb, 0x06, 0x2c, - 0x6c, 0xb4, 0xde, 0xed, 0x07, 0x1d, 0x2e, 0xaa, 0x72, 0x65, 0xce, 0x28, 0x76, 0x59, 0xfb, 0x34, - 0x56, 0x34, 0xac, 0xa8, 0x72, 0x25, 0xc9, 0xb1, 0x82, 0x5d, 0x9c, 0xdd, 0xc6, 0xc0, 0x6e, 0x3b, - 0x3e, 0x74, 0x41, 0x95, 0x2b, 0xb2, 0x31, 0xdf, 0xc5, 0xe6, 0xa0, 0xd5, 0xc7, 0xf6, 0xd1, 0xc1, - 0xc0, 0xf4, 0xa1, 0x44, 0x95, 0x2b, 0x92, 0x51, 0xec, 0xb2, 0xf6, 0x20, 0x76, 0xcf, 0x19, 0xf7, - 0xad, 0x9e, 0x8f, 0x3d, 0x8f, 0xfa, 0x2d, 0x76, 0x59, 0x7b, 0xd0, 0x83, 0x87, 0x27, 0x8e, 0x39, - 0xf1, 0xa1, 0xa6, 0x2a, 0x57, 0x0a, 0xc6, 0x7c, 0x17, 0x9b, 0x43, 0x56, 0x43, 0x6b, 0xd0, 0x55, - 0xe5, 0xca, 0x02, 0xb5, 0x3a, 0x63, 0x0d, 0xf6, 0x42, 0x6b, 0xd0, 0x53, 0xe5, 0x0a, 0xe1, 0x58, - 0x61, 0x0d, 0x44, 0xcd, 0x30, 0x21, 0x96, 0x16, 0xd5, 0x84, 0xa0, 0x19, 0xd6, 0x18, 0xd4, 0x0c, - 0x07, 0xbe, 0xa6, 0x26, 0x44, 0xcd, 0x84, 0x90, 0x38, 0x38, 0x47, 0x5e, 0x50, 0x13, 0xa2, 0x66, - 0x38, 0x32, 0xa4, 0x19, 0x8e, 0x7d, 0x5d, 0x4d, 0x04, 0x35, 0x33, 0x85, 0x16, 0x2d, 0x97, 0xd4, - 0x44, 0x50, 0x33, 0x1c, 0x1d, 0xd4, 0x0c, 0x07, 0x5f, 0x54, 0x13, 0x01, 0xcd, 0x84, 0xb1, 0xa2, - 0xe1, 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x38, 0x3b, 0x57, 0x33, 0x1c, 0xba, 0xac, 0x26, 0x44, 0xcd, - 0x88, 0x56, 0x3d, 0xcd, 0x70, 0xe8, 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x88, 0xf5, 0x34, 0xc3, 0xb1, - 0x97, 0xd5, 0x44, 0x40, 0x33, 0x1c, 0x7b, 0x5d, 0xd4, 0x0c, 0x87, 0x7e, 0x2c, 0xa9, 0x09, 0x51, - 0x34, 0x1c, 0x7a, 0x33, 0x20, 0x1a, 0x8e, 0xfd, 0x84, 0x62, 0x45, 0xd5, 0x84, 0xc1, 0xe2, 0x2a, - 0x7c, 0x4a, 0xc1, 0xa2, 0x6c, 0x38, 0xd8, 0x97, 0x8d, 0x1b, 0x82, 0x4a, 0x57, 0x54, 0xc9, 0x93, - 0x8d, 0x1b, 0xc3, 0x44, 0xd9, 0x78, 0xc0, 0xab, 0x18, 0x6a, 0xb9, 0x6c, 0xa6, 0x90, 0x35, 0xdd, - 0x47, 0xaa, 0xaa, 0xe4, 0xcb, 0xc6, 0x43, 0x06, 0x64, 0xe3, 0x61, 0xaf, 0xa9, 0x92, 0x28, 0x9b, - 0x19, 0x68, 0xd1, 0x72, 0x59, 0x95, 0x44, 0xd9, 0x78, 0x68, 0x51, 0x36, 0x1e, 0xf8, 0x0b, 0xaa, - 0x24, 0xc8, 0x66, 0x1a, 0x2b, 0x1a, 0xfe, 0xa2, 0x2a, 0x09, 0xb2, 0x09, 0xce, 0x8e, 0xc9, 0xc6, - 0x83, 0xbe, 0xa1, 0x4a, 0xbe, 0x6c, 0x82, 0x56, 0xb9, 0x6c, 0x3c, 0xe8, 0x9b, 0xaa, 0x24, 0xc8, - 0x26, 0x88, 0xe5, 0xb2, 0xf1, 0xb0, 0x6f, 0x61, 0x7e, 0x73, 0x65, 0xe3, 0x61, 0x05, 0xd9, 0x78, - 0xd0, 0xdf, 0xa1, 0xb9, 0xd0, 0x93, 0x8d, 0x07, 0x15, 0x65, 0xe3, 0x61, 0x7f, 0x97, 0x62, 0x7d, - 0xd9, 0x4c, 0x83, 0xc5, 0x55, 0xf8, 0x3d, 0x0a, 0xf6, 0x65, 0xe3, 0x81, 0x57, 0xd0, 0x09, 0x2a, - 0x9b, 0x8e, 0xd9, 0x6d, 0x1f, 0x0d, 0xa8, 0xc4, 0x2a, 0x54, 0x37, 0xf5, 0xa4, 0x33, 0x3e, 0x32, - 0xa9, 0x27, 0xb6, 0x3d, 0x78, 0xec, 0xf6, 0x91, 0x15, 0x6a, 0x9c, 0xc9, 0xc7, 0x27, 0x5c, 0xa7, - 0xfa, 0xa9, 0xcb, 0x55, 0xcd, 0x28, 0x32, 0x0d, 0x4d, 0xe3, 0x6b, 0xba, 0x80, 0xbf, 0x41, 0x55, - 0x54, 0x97, 0x6b, 0x3a, 0xc3, 0xd7, 0x74, 0x1f, 0x5f, 0x85, 0xf3, 0xbe, 0x94, 0x7c, 0xc6, 0x4d, - 0xaa, 0xa5, 0x7a, 0xa2, 0xaa, 0xad, 0x1a, 0x0b, 0xae, 0xa0, 0x66, 0x91, 0x02, 0xc3, 0xdc, 0xa2, - 0x92, 0xaa, 0x27, 0x6a, 0xba, 0x47, 0x12, 0x47, 0xd2, 0xa8, 0x0c, 0xb9, 0xb0, 0x7c, 0xce, 0x6d, - 0xaa, 0xac, 0x7a, 0xb2, 0xaa, 0xad, 0xae, 0x1a, 0x0a, 0xd7, 0xd7, 0x0c, 0x4e, 0x60, 0x9c, 0x15, - 0xaa, 0xb0, 0x7a, 0xb2, 0xa6, 0x7b, 0x9c, 0xe0, 0x38, 0x0b, 0xae, 0xd0, 0x7c, 0xca, 0x97, 0xa8, - 0xd2, 0xea, 0xe9, 0xea, 0x9a, 0xbe, 0xb6, 0x7e, 0xcf, 0x28, 0x32, 0xc5, 0xf9, 0x1c, 0x9d, 0x8e, - 0xc3, 0x25, 0xe7, 0x93, 0x56, 0xa9, 0xe6, 0xea, 0x69, 0xed, 0xce, 0xda, 0x5d, 0xed, 0xae, 0xa1, - 0x70, 0xed, 0xf9, 0xac, 0x77, 0x28, 0x8b, 0x8b, 0xcf, 0x67, 0xad, 0x51, 0xf5, 0xd5, 0x95, 0x67, - 0xe6, 0x60, 0x60, 0xdf, 0x52, 0xcb, 0x2f, 0xed, 0xf1, 0xa0, 0x73, 0xad, 0x0c, 0x86, 0xc2, 0xf5, - 0x28, 0x8e, 0xba, 0xe0, 0x0a, 0xd2, 0xa7, 0xff, 0x1a, 0x3d, 0x87, 0x15, 0xea, 0x99, 0x87, 0xfd, - 0x9e, 0x65, 0x4f, 0x4c, 0xa3, 0xc8, 0xa4, 0x19, 0x5a, 0x93, 0xbd, 0xf0, 0x3a, 0xfe, 0x3a, 0xa5, - 0x2d, 0xd4, 0x13, 0xb7, 0xab, 0x1a, 0x1d, 0x69, 0xd6, 0x3a, 0xee, 0x85, 0xd7, 0xf1, 0x37, 0x28, - 0x87, 0xd4, 0x13, 0xb7, 0x6b, 0x3a, 0xe7, 0x88, 0xeb, 0x78, 0x07, 0x2e, 0x84, 0xf2, 0x62, 0x6b, - 0xd4, 0x3e, 0x7c, 0x6e, 0x76, 0x4a, 0x1a, 0x4d, 0x8f, 0x0f, 0x65, 0x45, 0x32, 0xce, 0x07, 0x52, - 0xe4, 0x2e, 0x76, 0x93, 0x7b, 0xf0, 0x7a, 0x38, 0x51, 0xba, 0xcc, 0x2a, 0xcd, 0x97, 0xc8, 0x5c, - 0x0c, 0xe6, 0xcc, 0x10, 0x55, 0x08, 0xc0, 0x2e, 0x55, 0xa7, 0x09, 0xd4, 0xa7, 0xfa, 0x91, 0x98, - 0x53, 0x7f, 0x06, 0x2e, 0x4e, 0xa7, 0x52, 0x97, 0xbc, 0x4e, 0x33, 0x2a, 0x92, 0x2f, 0x84, 0xb3, - 0xea, 0x14, 0x7d, 0xc6, 0xd8, 0x35, 0x9a, 0x62, 0x45, 0xfa, 0xd4, 0xe8, 0xf7, 0xa1, 0x34, 0x95, - 0x6c, 0x5d, 0xf6, 0x1d, 0x9a, 0x73, 0x91, 0xfd, 0x5a, 0x28, 0xef, 0x86, 0xc9, 0x33, 0x86, 0xbe, - 0x4b, 0x93, 0xb0, 0x40, 0x9e, 0x1a, 0x19, 0x97, 0x2c, 0x98, 0x8e, 0x5d, 0xee, 0x3d, 0x9a, 0x95, - 0xf9, 0x92, 0x05, 0x32, 0xb3, 0x38, 0x6e, 0x28, 0x3f, 0xbb, 0xdc, 0x3a, 0x4d, 0xd3, 0x7c, 0xdc, - 0x60, 0xaa, 0xe6, 0xe4, 0xb7, 0x29, 0x79, 0x6f, 0xf6, 0x8c, 0x7f, 0x9c, 0xa0, 0x09, 0x96, 0xb3, - 0xf7, 0x66, 0x4d, 0xd9, 0x63, 0xcf, 0x98, 0xf2, 0x4f, 0x28, 0x9b, 0x08, 0xec, 0xa9, 0x39, 0x3f, - 0x06, 0xaf, 0xe2, 0xe8, 0x8d, 0xed, 0xa3, 0x51, 0x69, 0x43, 0x95, 0x2b, 0xa0, 0x5d, 0x99, 0xaa, - 0x7e, 0xdc, 0x43, 0xde, 0x26, 0x45, 0x19, 0x41, 0x12, 0xb3, 0xc2, 0xec, 0x32, 0x2b, 0xbb, 0x6a, - 0x22, 0xc2, 0x0a, 0x43, 0x79, 0x56, 0x04, 0x12, 0xb5, 0xe2, 0x06, 0x7d, 0x66, 0xe5, 0x03, 0x55, - 0x9a, 0x69, 0xc5, 0x4d, 0x01, 0xdc, 0x4a, 0x80, 0xb4, 0xb4, 0xee, 0xd7, 0x5b, 0xd8, 0x4f, 0xbe, - 0x18, 0x2e, 0xc0, 0x36, 0xf1, 0xfc, 0x1c, 0xac, 0xb4, 0x18, 0x4d, 0x70, 0x6e, 0x9a, 0xf6, 0xb3, - 0x11, 0xb4, 0x80, 0x37, 0xd3, 0xb4, 0x9f, 0x9b, 0x41, 0x2b, 0xff, 0xa6, 0x04, 0x49, 0x5a, 0x4f, - 0x92, 0x2c, 0x24, 0xdf, 0x6b, 0x6e, 0x3d, 0x56, 0xce, 0xd1, 0x5f, 0x0f, 0x9b, 0xcd, 0xa7, 0x8a, - 0x44, 0x72, 0x90, 0x7a, 0xf8, 0x95, 0xfd, 0xc6, 0x9e, 0x22, 0x93, 0x22, 0xe4, 0x37, 0xb6, 0x76, - 0x36, 0x1b, 0xc6, 0xae, 0xb1, 0xb5, 0xb3, 0xaf, 0x24, 0x68, 0xdf, 0xc6, 0xd3, 0xe6, 0x83, 0x7d, - 0x25, 0x49, 0x32, 0x90, 0xa0, 0x6d, 0x29, 0x02, 0x90, 0xde, 0xdb, 0x37, 0xb6, 0x76, 0x36, 0x95, - 0x34, 0xb5, 0xb2, 0xbf, 0xb5, 0xdd, 0x50, 0x32, 0x14, 0xb9, 0xff, 0xee, 0xee, 0xd3, 0x86, 0x92, - 0xa5, 0x3f, 0x1f, 0x18, 0xc6, 0x83, 0xaf, 0x28, 0x39, 0x4a, 0xda, 0x7e, 0xb0, 0xab, 0x00, 0x76, - 0x3f, 0x78, 0xf8, 0xb4, 0xa1, 0xe4, 0x49, 0x01, 0xb2, 0x1b, 0xef, 0xee, 0x3c, 0xda, 0xdf, 0x6a, - 0xee, 0x28, 0x85, 0xf2, 0x6f, 0xc9, 0x00, 0x9b, 0xf6, 0xde, 0xf3, 0xfe, 0x08, 0xab, 0xe2, 0xcb, - 0x00, 0x93, 0xe7, 0xfd, 0x51, 0x0b, 0xa5, 0xc7, 0x2b, 0xbb, 0x1c, 0x6d, 0xc1, 0xa0, 0x43, 0xae, - 0x41, 0x01, 0xbb, 0xbb, 0x2c, 0x14, 0x60, 0x41, 0x97, 0x31, 0xf2, 0xb4, 0x8d, 0x47, 0x87, 0x20, - 0xa4, 0xa6, 0x63, 0x1d, 0x97, 0x16, 0x20, 0x35, 0x9d, 0x5c, 0x05, 0x7c, 0x6c, 0x4d, 0x30, 0xac, - 0x63, 0xed, 0x96, 0x33, 0x70, 0x5c, 0x16, 0xe8, 0xc9, 0xdb, 0x80, 0x63, 0x32, 0x59, 0x14, 0xa7, - 0x25, 0xea, 0xba, 0xbb, 0x42, 0x7f, 0x30, 0x59, 0xf8, 0x84, 0xa5, 0x26, 0xe4, 0xbc, 0x76, 0x3a, - 0x16, 0xb6, 0xf2, 0x19, 0x29, 0x38, 0x23, 0xc0, 0x26, 0x6f, 0x4a, 0x0c, 0xc0, 0xbd, 0x59, 0x40, - 0x6f, 0x18, 0x89, 0xb9, 0x53, 0xbe, 0x0c, 0x73, 0x3b, 0xb6, 0xc5, 0xb6, 0x10, 0xae, 0x52, 0x01, - 0xa4, 0x76, 0x49, 0xc2, 0x12, 0x46, 0x6a, 0x97, 0xaf, 0x00, 0x08, 0x7d, 0x0a, 0x48, 0x07, 0xac, - 0x0f, 0x37, 0xa2, 0x74, 0x50, 0xbe, 0x09, 0xe9, 0xed, 0xf6, 0xf1, 0x7e, 0xbb, 0x47, 0xae, 0x01, - 0x0c, 0xda, 0x13, 0xa7, 0xd5, 0x45, 0xa9, 0x7c, 0xfe, 0xf9, 0xe7, 0x9f, 0x4b, 0x78, 0xe2, 0xca, - 0xd1, 0x56, 0x26, 0x95, 0x17, 0x00, 0xcd, 0x41, 0x67, 0xdb, 0x9c, 0x4c, 0xda, 0x3d, 0x93, 0x54, - 0x21, 0x6d, 0x99, 0x13, 0x9a, 0x72, 0x24, 0x2c, 0xe6, 0x97, 0xfd, 0x55, 0xf0, 0x51, 0x2b, 0x3b, - 0x08, 0x31, 0x38, 0x94, 0x28, 0x90, 0xb0, 0x8e, 0x86, 0x78, 0x59, 0x91, 0x32, 0xe8, 0xcf, 0xa5, - 0x4b, 0x90, 0x66, 0x18, 0x42, 0x20, 0x69, 0xb5, 0x87, 0x66, 0x89, 0x8d, 0x8b, 0xbf, 0xcb, 0xbf, - 0x2a, 0x01, 0xec, 0x98, 0x2f, 0xcf, 0x30, 0xa6, 0x8f, 0x8a, 0x19, 0x33, 0xc1, 0xc6, 0xbc, 0x1f, - 0x37, 0x26, 0xd5, 0x59, 0xd7, 0xb6, 0x3b, 0x2d, 0xf6, 0x8a, 0xd9, 0xbd, 0x4a, 0x8e, 0xb6, 0xe0, - 0x5b, 0x2b, 0x7f, 0x00, 0x85, 0x2d, 0xcb, 0x32, 0xc7, 0xae, 0x4f, 0x04, 0x92, 0xcf, 0xec, 0x89, - 0xc3, 0x2f, 0x78, 0xf0, 0x37, 0x29, 0x41, 0x72, 0x64, 0x8f, 0x1d, 0x36, 0xcf, 0x7a, 0x52, 0x5f, - 0x5d, 0x5d, 0x35, 0xb0, 0x85, 0x5c, 0x82, 0xdc, 0xa1, 0x6d, 0x59, 0xe6, 0x21, 0x9d, 0x44, 0x02, - 0x6b, 0x0b, 0xbf, 0xa1, 0xfc, 0xcb, 0x12, 0x14, 0x9a, 0xce, 0x33, 0xdf, 0xb8, 0x02, 0x89, 0xe7, - 0xe6, 0x09, 0xba, 0x97, 0x30, 0xe8, 0x4f, 0xb2, 0x08, 0xa9, 0x9f, 0x6f, 0x0f, 0x8e, 0xd8, 0x85, - 0x4f, 0xc1, 0x60, 0x0f, 0xe4, 0x02, 0xa4, 0x5f, 0x9a, 0xfd, 0xde, 0x33, 0x07, 0x6d, 0xca, 0x06, - 0x7f, 0x22, 0xb7, 0x20, 0xd5, 0xa7, 0xce, 0x96, 0x92, 0xb8, 0x5e, 0x17, 0xfc, 0xf5, 0x12, 0xe7, - 0x60, 0x30, 0xd0, 0x8d, 0x6c, 0xb6, 0xa3, 0x7c, 0xf4, 0xd1, 0x47, 0x1f, 0xc9, 0xe5, 0x2e, 0x2c, - 0xba, 0xb1, 0x23, 0x30, 0xd9, 0x1d, 0x28, 0x0d, 0x4c, 0xbb, 0xd5, 0xed, 0x5b, 0xed, 0xc1, 0xe0, - 0xa4, 0xf5, 0xd2, 0xb6, 0x5a, 0x6d, 0xab, 0x65, 0x4f, 0x0e, 0xdb, 0x63, 0x5c, 0x80, 0xe8, 0x21, - 0x16, 0x07, 0xa6, 0xbd, 0xc1, 0x68, 0xef, 0xdb, 0xd6, 0x03, 0xab, 0x49, 0x39, 0xe5, 0x3f, 0x48, - 0x42, 0x6e, 0xfb, 0xc4, 0xb5, 0xbe, 0x08, 0xa9, 0x43, 0xfb, 0xc8, 0x62, 0x6b, 0x99, 0x32, 0xd8, - 0x83, 0xf7, 0x8e, 0x64, 0xe1, 0x1d, 0x2d, 0x42, 0xea, 0xc5, 0x91, 0xed, 0x98, 0x38, 0xdd, 0x9c, - 0xc1, 0x1e, 0xe8, 0x6a, 0x8d, 0x4c, 0xa7, 0x94, 0xc4, 0x0a, 0x93, 0xfe, 0xf4, 0xe7, 0x9f, 0x3a, - 0xc3, 0xfc, 0xc9, 0x0a, 0xa4, 0x6d, 0xba, 0xfa, 0x93, 0x52, 0x1a, 0x2f, 0xb7, 0x04, 0xb8, 0xf8, - 0x56, 0x0c, 0x8e, 0x22, 0x5b, 0xb0, 0xf0, 0xd2, 0x6c, 0x0d, 0x8f, 0x26, 0x4e, 0xab, 0x67, 0xb7, - 0x3a, 0xa6, 0x39, 0x32, 0xc7, 0xa5, 0x39, 0x1c, 0x49, 0x88, 0x09, 0xb3, 0x16, 0xd2, 0x98, 0x7f, - 0x69, 0x6e, 0x1f, 0x4d, 0x9c, 0x4d, 0xfb, 0x31, 0xb2, 0x48, 0x15, 0x72, 0x63, 0x93, 0x46, 0x02, - 0xea, 0x6c, 0x21, 0x3c, 0x7a, 0x80, 0x9a, 0x1d, 0x9b, 0x23, 0x6c, 0x20, 0xeb, 0x90, 0x3d, 0xe8, - 0x3f, 0x37, 0x27, 0xcf, 0xcc, 0x4e, 0x29, 0xa3, 0x4a, 0x95, 0x79, 0xed, 0xa2, 0xcf, 0xf1, 0x96, - 0x75, 0xe5, 0x91, 0x3d, 0xb0, 0xc7, 0x86, 0x07, 0x25, 0xf7, 0x21, 0x37, 0xb1, 0x87, 0x26, 0xd3, - 0x77, 0x16, 0x33, 0xdb, 0xe5, 0x59, 0xbc, 0x3d, 0x7b, 0x68, 0xba, 0x11, 0xcc, 0xc5, 0x93, 0x65, - 0xe6, 0xe8, 0x01, 0x3d, 0xbf, 0x96, 0x00, 0xeb, 0x73, 0xea, 0x10, 0x9e, 0x67, 0xc9, 0x12, 0x75, - 0xa8, 0xd7, 0xa5, 0xc7, 0x92, 0x52, 0x1e, 0x8b, 0x3b, 0xef, 0x79, 0xe9, 0x16, 0xe4, 0x3c, 0x83, - 0x7e, 0xe8, 0x63, 0xe1, 0x26, 0x87, 0xf1, 0x80, 0x85, 0x3e, 0x16, 0x6b, 0xde, 0x80, 0x14, 0xba, - 0x4d, 0xd3, 0x84, 0xd1, 0xa0, 0x59, 0x29, 0x07, 0xa9, 0x4d, 0xa3, 0xd1, 0xd8, 0x51, 0x24, 0x4c, - 0x50, 0x4f, 0xdf, 0x6d, 0x28, 0xb2, 0xa0, 0xd8, 0xdf, 0x96, 0x20, 0xd1, 0x38, 0x46, 0xb5, 0xd0, - 0x69, 0xb8, 0x3b, 0x9a, 0xfe, 0xd6, 0x6a, 0x90, 0x1c, 0xda, 0x63, 0x93, 0x9c, 0x9f, 0x31, 0xcb, - 0x52, 0x0f, 0xdf, 0x97, 0x70, 0x95, 0xdb, 0x38, 0x76, 0x0c, 0xc4, 0x6b, 0x6f, 0x41, 0xd2, 0x31, - 0x8f, 0x9d, 0xd9, 0xbc, 0x67, 0x6c, 0x00, 0x0a, 0xd0, 0x6e, 0x42, 0xda, 0x3a, 0x1a, 0x1e, 0x98, - 0xe3, 0xd9, 0xd0, 0x3e, 0x4e, 0x8f, 0x43, 0xca, 0xef, 0x81, 0xf2, 0xc8, 0x1e, 0x8e, 0x06, 0xe6, - 0x71, 0xe3, 0xd8, 0x31, 0xad, 0x49, 0xdf, 0xb6, 0xa8, 0x9e, 0xbb, 0xfd, 0x31, 0x46, 0x11, 0xbc, - 0xb0, 0xc5, 0x07, 0xba, 0xab, 0x27, 0xe6, 0xa1, 0x6d, 0x75, 0x78, 0xc0, 0xe4, 0x4f, 0x14, 0xed, - 0x3c, 0xeb, 0x8f, 0x69, 0x00, 0xa1, 0x71, 0x9e, 0x3d, 0x94, 0x37, 0xa1, 0xc8, 0x0f, 0xfa, 0x13, - 0x3e, 0x70, 0xf9, 0x06, 0x14, 0xdc, 0x26, 0xbc, 0xbd, 0xce, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x54, - 0xce, 0xd1, 0x65, 0x6d, 0xee, 0x34, 0x14, 0x89, 0xfe, 0xd8, 0x7f, 0xbf, 0x19, 0x58, 0xca, 0x4b, - 0x50, 0xf0, 0x7c, 0xdf, 0x33, 0x1d, 0xec, 0xa1, 0x09, 0x21, 0x53, 0x97, 0xb3, 0x52, 0x39, 0x03, - 0xa9, 0xc6, 0x70, 0xe4, 0x9c, 0x94, 0x7f, 0x11, 0xf2, 0x1c, 0xf4, 0xb4, 0x3f, 0x71, 0xc8, 0x1d, - 0xc8, 0x0c, 0xf9, 0x7c, 0x25, 0x3c, 0x73, 0x89, 0x9a, 0xf2, 0x71, 0xee, 0x6f, 0xc3, 0x45, 0x2f, - 0x55, 0x21, 0x23, 0xc4, 0x52, 0xbe, 0xd5, 0x65, 0x71, 0xab, 0xb3, 0xa0, 0x90, 0x10, 0x82, 0x42, - 0x79, 0x1b, 0x32, 0x2c, 0x03, 0x4e, 0x30, 0xab, 0xb3, 0x7a, 0x8d, 0x89, 0x89, 0xbd, 0xf9, 0x3c, - 0x6b, 0x63, 0x57, 0xc8, 0x57, 0x21, 0x8f, 0x82, 0xe5, 0x08, 0x16, 0x3a, 0x01, 0x9b, 0x98, 0xdc, - 0x7e, 0x3f, 0x05, 0x59, 0x77, 0xa5, 0xc8, 0x32, 0xa4, 0x59, 0x91, 0x84, 0xa6, 0xdc, 0x22, 0x3e, - 0x85, 0x65, 0x11, 0x59, 0x86, 0x0c, 0x2f, 0x84, 0x78, 0x74, 0xa7, 0x15, 0x7b, 0x9a, 0x15, 0x3e, - 0x5e, 0x67, 0x4d, 0xc7, 0xc0, 0xc4, 0xca, 0xf3, 0x34, 0x2b, 0x6d, 0x88, 0x0a, 0x39, 0xaf, 0x98, - 0xc1, 0x78, 0xcc, 0x6b, 0xf1, 0xac, 0x5b, 0xbd, 0x08, 0x88, 0x9a, 0x8e, 0x11, 0x8b, 0x17, 0xde, - 0xd9, 0xae, 0x7f, 0x3c, 0xc9, 0xba, 0x25, 0x09, 0xde, 0xa1, 0xbb, 0x55, 0x76, 0x86, 0x17, 0x21, - 0x3e, 0xa0, 0xa6, 0x63, 0x48, 0x70, 0x4b, 0xea, 0x0c, 0x2f, 0x34, 0xc8, 0x55, 0xea, 0x22, 0x16, - 0x0e, 0xb8, 0xf5, 0xfd, 0xfa, 0x39, 0xcd, 0xca, 0x09, 0x72, 0x8d, 0x5a, 0x60, 0xd5, 0x01, 0xee, - 0x4b, 0xbf, 0x58, 0xce, 0xf0, 0xa2, 0x81, 0xdc, 0xa4, 0x10, 0xb6, 0xfc, 0x25, 0x88, 0xa8, 0x8c, - 0x33, 0xbc, 0x32, 0x26, 0x2a, 0x1d, 0x10, 0xc3, 0x03, 0x86, 0x04, 0xa1, 0x0a, 0x4e, 0xb3, 0x2a, - 0x98, 0x5c, 0x41, 0x73, 0x6c, 0x52, 0x05, 0xbf, 0xe2, 0xcd, 0xf0, 0x2a, 0xc3, 0xef, 0xc7, 0x23, - 0x9b, 0x57, 0xdd, 0x66, 0x78, 0x1d, 0x41, 0x6a, 0xf4, 0x7d, 0x51, 0x7d, 0x97, 0xe6, 0x31, 0x08, - 0x96, 0x7c, 0xe1, 0xb9, 0xef, 0x94, 0xc5, 0xc0, 0x3a, 0x8b, 0x20, 0x46, 0xaa, 0x8b, 0xbb, 0x61, - 0x89, 0xf2, 0x76, 0xfb, 0x56, 0xb7, 0x54, 0xc4, 0x95, 0x48, 0xf4, 0xad, 0xae, 0x91, 0xea, 0xd2, - 0x16, 0xa6, 0x81, 0x1d, 0xda, 0xa7, 0x60, 0x5f, 0xf2, 0x36, 0xeb, 0xa4, 0x4d, 0xa4, 0x04, 0xa9, - 0x8d, 0xd6, 0x4e, 0xdb, 0x2a, 0x2d, 0x30, 0x9e, 0xd5, 0xb6, 0x8c, 0x64, 0x77, 0xa7, 0x6d, 0x91, - 0xb7, 0x20, 0x31, 0x39, 0x3a, 0x28, 0x91, 0xf0, 0xe7, 0x8d, 0xbd, 0xa3, 0x03, 0xd7, 0x15, 0x83, - 0x22, 0xc8, 0x32, 0x64, 0x27, 0xce, 0xb8, 0xf5, 0x0b, 0xe6, 0xd8, 0x2e, 0x9d, 0xc7, 0x25, 0x3c, - 0x67, 0x64, 0x26, 0xce, 0xf8, 0x03, 0x73, 0x6c, 0x9f, 0x31, 0xf8, 0x95, 0xaf, 0x40, 0x5e, 0xb0, - 0x4b, 0x8a, 0x20, 0x59, 0xec, 0xa4, 0x50, 0x97, 0xee, 0x18, 0x92, 0x55, 0xde, 0x87, 0x82, 0x5b, - 0x48, 0xe0, 0x7c, 0x35, 0xba, 0x93, 0x06, 0xf6, 0x18, 0xf7, 0xe7, 0xbc, 0x76, 0x49, 0x4c, 0x51, - 0x3e, 0x8c, 0xa7, 0x0b, 0x06, 0x2d, 0x2b, 0x21, 0x57, 0xa4, 0xf2, 0x0f, 0x25, 0x28, 0x6c, 0xdb, - 0x63, 0xff, 0x96, 0x77, 0x11, 0x52, 0x07, 0xb6, 0x3d, 0x98, 0xa0, 0xd9, 0xac, 0xc1, 0x1e, 0xc8, - 0x1b, 0x50, 0xc0, 0x1f, 0x6e, 0x01, 0x28, 0x7b, 0xf7, 0x0b, 0x79, 0x6c, 0xe7, 0x55, 0x1f, 0x81, - 0x64, 0xdf, 0x72, 0x26, 0x3c, 0x92, 0xe1, 0x6f, 0xf2, 0x05, 0xc8, 0xd3, 0xbf, 0x2e, 0x33, 0xe9, - 0x1d, 0x58, 0x81, 0x36, 0x73, 0xe2, 0x5b, 0x30, 0x87, 0x6f, 0xdf, 0x83, 0x65, 0xbc, 0xbb, 0x84, - 0x02, 0xeb, 0xe0, 0xc0, 0x12, 0x64, 0x58, 0x28, 0x98, 0xe0, 0x27, 0xab, 0x9c, 0xe1, 0x3e, 0xd2, - 0xf0, 0x8a, 0x95, 0x00, 0x4b, 0xf7, 0x19, 0x83, 0x3f, 0x95, 0x1f, 0x40, 0x16, 0xb3, 0x54, 0x73, - 0xd0, 0x21, 0x65, 0x90, 0x7a, 0x25, 0x13, 0x73, 0xe4, 0xa2, 0x70, 0xcc, 0xe7, 0xdd, 0x2b, 0x9b, - 0x86, 0xd4, 0x5b, 0x5a, 0x00, 0x69, 0x93, 0x9e, 0xbb, 0x8f, 0x79, 0x98, 0x96, 0x8e, 0xcb, 0x4d, - 0x6e, 0x62, 0xc7, 0x7c, 0x19, 0x67, 0x62, 0xc7, 0x7c, 0xc9, 0x4c, 0x5c, 0x9d, 0x32, 0x41, 0x9f, - 0x4e, 0xf8, 0xf7, 0x3b, 0xe9, 0x84, 0x9e, 0xf3, 0x71, 0x7b, 0xf6, 0xad, 0xde, 0xae, 0xdd, 0xb7, - 0xf0, 0x9c, 0xdf, 0xc5, 0x73, 0x92, 0x64, 0x48, 0xdd, 0xf2, 0x67, 0x49, 0x98, 0xe7, 0x41, 0xf4, - 0xfd, 0xbe, 0xf3, 0x6c, 0xbb, 0x3d, 0x22, 0x4f, 0xa1, 0x40, 0xe3, 0x67, 0x6b, 0xd8, 0x1e, 0x8d, - 0xe8, 0x46, 0x95, 0xf0, 0x50, 0x71, 0x7d, 0x2a, 0x28, 0x73, 0xfc, 0xca, 0x4e, 0x7b, 0x68, 0x6e, - 0x33, 0x6c, 0xc3, 0x72, 0xc6, 0x27, 0x46, 0xde, 0xf2, 0x5b, 0xc8, 0x16, 0xe4, 0x87, 0x93, 0x9e, - 0x67, 0x4c, 0x46, 0x63, 0x95, 0x48, 0x63, 0xdb, 0x93, 0x5e, 0xc0, 0x16, 0x0c, 0xbd, 0x06, 0xea, - 0x18, 0x8d, 0xbc, 0x9e, 0xad, 0xc4, 0x29, 0x8e, 0xd1, 0x20, 0x11, 0x74, 0xec, 0xc0, 0x6f, 0x21, - 0x8f, 0x01, 0xe8, 0x46, 0x72, 0x6c, 0x5a, 0x24, 0xa1, 0x56, 0xf2, 0xda, 0x9b, 0x91, 0xb6, 0xf6, - 0x9c, 0xf1, 0xbe, 0xbd, 0xe7, 0x8c, 0x99, 0x21, 0xba, 0x05, 0xf1, 0x71, 0xe9, 0x1d, 0x50, 0xc2, - 0xf3, 0x17, 0xcf, 0xde, 0xa9, 0x19, 0x67, 0xef, 0x1c, 0x3f, 0x7b, 0xd7, 0xe5, 0xbb, 0xd2, 0xd2, - 0x7b, 0x50, 0x0c, 0x4d, 0x59, 0xa4, 0x13, 0x46, 0xbf, 0x2d, 0xd2, 0xf3, 0xda, 0xeb, 0xc2, 0xd7, - 0x63, 0xf1, 0xd5, 0x8a, 0x76, 0xdf, 0x01, 0x25, 0x3c, 0x7d, 0xd1, 0x70, 0x36, 0xa6, 0x26, 0x40, - 0xfe, 0x7d, 0x98, 0x0b, 0x4c, 0x59, 0x24, 0xe7, 0x4e, 0x99, 0x54, 0xf9, 0x97, 0x52, 0x90, 0x6a, - 0x5a, 0xa6, 0xdd, 0x25, 0xaf, 0x07, 0x33, 0xe2, 0x93, 0x73, 0x6e, 0x36, 0xbc, 0x18, 0xca, 0x86, - 0x4f, 0xce, 0x79, 0xb9, 0xf0, 0x62, 0x28, 0x17, 0xba, 0x5d, 0x35, 0x9d, 0x5c, 0x9e, 0xca, 0x84, - 0x4f, 0xce, 0x09, 0x69, 0xf0, 0xf2, 0x54, 0x1a, 0xf4, 0xbb, 0x6b, 0x3a, 0x0d, 0x9d, 0xc1, 0x1c, - 0xf8, 0xe4, 0x9c, 0x9f, 0xff, 0x96, 0xc3, 0xf9, 0xcf, 0xeb, 0xac, 0xe9, 0xcc, 0x25, 0x21, 0xf7, - 0xa1, 0x4b, 0x2c, 0xeb, 0x2d, 0x87, 0xb3, 0x1e, 0xf2, 0x78, 0xbe, 0x5b, 0x0e, 0xe7, 0x3b, 0xec, - 0xe4, 0xf9, 0xed, 0x62, 0x28, 0xbf, 0xa1, 0x51, 0x96, 0xd8, 0x96, 0xc3, 0x89, 0x8d, 0xf1, 0x04, - 0x4f, 0xc5, 0xac, 0xe6, 0x75, 0xd6, 0x74, 0xa2, 0x85, 0x52, 0x5a, 0xf4, 0xb9, 0x1e, 0xdf, 0x05, - 0x86, 0x77, 0x9d, 0x2e, 0x9b, 0x7b, 0xe4, 0x2c, 0xc6, 0x7c, 0x60, 0xc7, 0xd5, 0x74, 0x8f, 0x5c, - 0x1a, 0x64, 0xba, 0xbc, 0xd4, 0x55, 0x30, 0x46, 0x09, 0xb2, 0xc4, 0x97, 0xbf, 0xb2, 0xd1, 0xc2, - 0x58, 0x85, 0xf3, 0x62, 0xa7, 0xf7, 0x0a, 0xcc, 0x6d, 0xb4, 0x9e, 0xb6, 0xc7, 0x3d, 0x73, 0xe2, - 0xb4, 0xf6, 0xdb, 0x3d, 0xef, 0xba, 0x80, 0xbe, 0xff, 0x7c, 0x97, 0xf7, 0xec, 0xb7, 0x7b, 0xe4, - 0x82, 0x2b, 0xae, 0x0e, 0xf6, 0x4a, 0x5c, 0x5e, 0x4b, 0xaf, 0xd3, 0x45, 0x63, 0xc6, 0x30, 0xea, - 0x2d, 0xf0, 0xa8, 0xf7, 0x30, 0x03, 0xa9, 0x23, 0xab, 0x6f, 0x5b, 0x0f, 0x73, 0x90, 0x71, 0xec, - 0xf1, 0xb0, 0xed, 0xd8, 0xe5, 0x1f, 0x49, 0x00, 0x8f, 0xec, 0xe1, 0xf0, 0xc8, 0xea, 0xbf, 0x38, - 0x32, 0xc9, 0x15, 0xc8, 0x0f, 0xdb, 0xcf, 0xcd, 0xd6, 0xd0, 0x6c, 0x1d, 0x8e, 0xdd, 0x7d, 0x90, - 0xa3, 0x4d, 0xdb, 0xe6, 0xa3, 0xf1, 0x09, 0x29, 0xb9, 0x87, 0x71, 0xd4, 0x0e, 0x4a, 0x92, 0x1f, - 0xce, 0x17, 0xf9, 0xf1, 0x32, 0xcd, 0xdf, 0xa1, 0x7b, 0xc0, 0x64, 0x15, 0x43, 0x86, 0xbf, 0x3d, - 0x7c, 0xa2, 0x92, 0x77, 0xcc, 0xe1, 0xa8, 0x75, 0x88, 0x52, 0xa1, 0x72, 0x48, 0xd1, 0xe7, 0x47, - 0xe4, 0x36, 0x24, 0x0e, 0xed, 0x01, 0x8a, 0xe4, 0x94, 0xf7, 0x42, 0x71, 0xe4, 0x0d, 0x48, 0x0c, - 0x27, 0x4c, 0x36, 0x79, 0x6d, 0x41, 0x38, 0x11, 0xb0, 0x24, 0x44, 0x61, 0xc3, 0x49, 0xcf, 0x9b, - 0xf7, 0x8d, 0x22, 0x24, 0x36, 0x9a, 0x4d, 0x9a, 0xe5, 0x37, 0x9a, 0xcd, 0x35, 0x45, 0xaa, 0x7f, - 0x09, 0xb2, 0xbd, 0xb1, 0x69, 0xd2, 0xf0, 0x30, 0xbb, 0xba, 0xf8, 0x10, 0xb3, 0x9a, 0x07, 0xaa, - 0x6f, 0x43, 0xe6, 0x90, 0xd5, 0x17, 0x24, 0xa2, 0x80, 0x2d, 0xfd, 0x21, 0xbb, 0x3e, 0x59, 0xf2, - 0xbb, 0xc3, 0x15, 0x89, 0xe1, 0xda, 0xa8, 0xef, 0x42, 0x6e, 0xdc, 0x3a, 0xcd, 0xe0, 0xc7, 0x2c, - 0xbb, 0xc4, 0x19, 0xcc, 0x8e, 0x79, 0x53, 0xbd, 0x01, 0x0b, 0x96, 0xed, 0x7e, 0xb2, 0x68, 0x75, - 0xd8, 0x1e, 0xbb, 0x38, 0x7d, 0x68, 0x73, 0x8d, 0x9b, 0xec, 0x33, 0xa1, 0x65, 0xf3, 0x0e, 0xb6, - 0x2b, 0xeb, 0x8f, 0x40, 0x11, 0xcc, 0x60, 0x91, 0x19, 0x67, 0xa5, 0xcb, 0xbe, 0x4b, 0x7a, 0x56, - 0x70, 0xdf, 0x87, 0x8c, 0xb0, 0x9d, 0x19, 0x63, 0xa4, 0xc7, 0x3e, 0xf2, 0x7a, 0x46, 0x30, 0xd4, - 0x4d, 0x1b, 0xa1, 0xb1, 0x26, 0xda, 0xc8, 0x33, 0xf6, 0xfd, 0x57, 0x34, 0x52, 0xd3, 0x43, 0xab, - 0x72, 0x74, 0xaa, 0x2b, 0x7d, 0xf6, 0xf9, 0xd6, 0xb3, 0xc2, 0x02, 0xe0, 0x0c, 0x33, 0xf1, 0xce, - 0x7c, 0xc8, 0xbe, 0xec, 0x06, 0xcc, 0x4c, 0x79, 0x33, 0x39, 0xd5, 0x9b, 0xe7, 0xec, 0x33, 0xaa, - 0x67, 0x66, 0x6f, 0x96, 0x37, 0x93, 0x53, 0xbd, 0x19, 0xb0, 0x0f, 0xac, 0x01, 0x33, 0x35, 0xbd, - 0xbe, 0x09, 0x44, 0x7c, 0xd5, 0x3c, 0x4f, 0xc4, 0xd8, 0x19, 0xb2, 0xcf, 0xe6, 0xfe, 0xcb, 0x66, - 0x94, 0x59, 0x86, 0xe2, 0x1d, 0xb2, 0xd8, 0x17, 0xf5, 0xa0, 0xa1, 0x9a, 0x5e, 0xdf, 0x82, 0xf3, - 0xe2, 0xc4, 0xce, 0xe0, 0x92, 0xad, 0x4a, 0x95, 0xa2, 0xb1, 0xe0, 0x4f, 0x8d, 0x73, 0x66, 0x9a, - 0x8a, 0x77, 0x6a, 0xa4, 0x4a, 0x15, 0x65, 0xca, 0x54, 0x4d, 0xaf, 0x3f, 0x80, 0xa2, 0x60, 0xea, - 0x00, 0x33, 0x74, 0xb4, 0x99, 0x17, 0xec, 0x5f, 0x1b, 0x3c, 0x33, 0x34, 0xa3, 0x87, 0xdf, 0x18, - 0xcf, 0x71, 0xd1, 0x46, 0xc6, 0xec, 0xbb, 0xbc, 0xef, 0x0b, 0x32, 0x42, 0x5b, 0x02, 0x2b, 0xed, - 0x38, 0x2b, 0x13, 0xf6, 0xc5, 0xde, 0x77, 0x85, 0x12, 0xea, 0xfd, 0xc0, 0x74, 0x4c, 0x9a, 0xe4, - 0x62, 0x6c, 0x38, 0x18, 0x91, 0xdf, 0x8c, 0x04, 0xac, 0x88, 0x57, 0x21, 0xc2, 0xb4, 0xe9, 0x63, - 0x7d, 0x0b, 0xe6, 0xcf, 0x1e, 0x90, 0x3e, 0x96, 0x58, 0x5d, 0x5c, 0x5d, 0xa1, 0xa5, 0xb3, 0x31, - 0xd7, 0x09, 0xc4, 0xa5, 0x06, 0xcc, 0x9d, 0x39, 0x28, 0x7d, 0x22, 0xb1, 0xea, 0x92, 0x5a, 0x32, - 0x0a, 0x9d, 0x60, 0x64, 0x9a, 0x3b, 0x73, 0x58, 0xfa, 0x54, 0x62, 0x57, 0x11, 0xba, 0xe6, 0x19, - 0x71, 0x23, 0xd3, 0xdc, 0x99, 0xc3, 0xd2, 0x57, 0x59, 0xed, 0x28, 0xeb, 0x55, 0xd1, 0x08, 0xc6, - 0x82, 0xf9, 0xb3, 0x87, 0xa5, 0xaf, 0x49, 0x78, 0x2d, 0x21, 0xeb, 0xba, 0xb7, 0x2e, 0x5e, 0x64, - 0x9a, 0x3f, 0x7b, 0x58, 0xfa, 0xba, 0x84, 0x97, 0x17, 0xb2, 0xbe, 0x1e, 0x30, 0x13, 0xf4, 0xe6, - 0xf4, 0xb0, 0xf4, 0x0d, 0x09, 0xef, 0x13, 0x64, 0xbd, 0xe6, 0x99, 0xd9, 0x9b, 0xf2, 0xe6, 0xf4, - 0xb0, 0xf4, 0x4d, 0x3c, 0xc5, 0xd7, 0x65, 0xfd, 0x4e, 0xc0, 0x0c, 0x46, 0xa6, 0xe2, 0x2b, 0x84, - 0xa5, 0x6f, 0x49, 0x78, 0xed, 0x23, 0xeb, 0x77, 0x0d, 0x77, 0x74, 0x3f, 0x32, 0x15, 0x5f, 0x21, - 0x2c, 0x7d, 0x26, 0xe1, 0xed, 0x90, 0xac, 0xdf, 0x0b, 0x1a, 0xc2, 0xc8, 0xa4, 0xbc, 0x4a, 0x58, - 0xfa, 0x36, 0xb5, 0x54, 0xac, 0xcb, 0xeb, 0xab, 0x86, 0xeb, 0x80, 0x10, 0x99, 0x94, 0x57, 0x09, - 0x4b, 0xdf, 0xa1, 0xa6, 0x94, 0xba, 0xbc, 0xbe, 0x16, 0x32, 0x55, 0xd3, 0xeb, 0x8f, 0xa0, 0x70, - 0xd6, 0xb0, 0xf4, 0x5d, 0xf1, 0xd6, 0x2d, 0xdf, 0x11, 0x62, 0xd3, 0xae, 0xf0, 0xce, 0x4e, 0x0d, - 0x4c, 0xdf, 0xc3, 0x1a, 0xa7, 0x3e, 0xf7, 0x84, 0xdd, 0x4c, 0x31, 0x82, 0xff, 0xfa, 0x58, 0x98, - 0xda, 0xf6, 0xf7, 0xc7, 0xa9, 0x31, 0xea, 0xfb, 0x12, 0x5e, 0x5f, 0x15, 0xb8, 0x41, 0xc4, 0x7b, - 0x3b, 0x85, 0x05, 0xac, 0x0f, 0xfd, 0x59, 0x9e, 0x16, 0xad, 0x7e, 0x20, 0xbd, 0x4a, 0xb8, 0xaa, - 0x27, 0x9a, 0x3b, 0x0d, 0x6f, 0x31, 0xb0, 0xe5, 0x6d, 0x48, 0x1e, 0x6b, 0xab, 0x6b, 0xe2, 0x91, - 0x4c, 0xbc, 0xb5, 0x65, 0x41, 0x2a, 0xaf, 0x15, 0x85, 0x8b, 0xed, 0xe1, 0xc8, 0x39, 0x31, 0x90, - 0xc5, 0xd9, 0x5a, 0x24, 0xfb, 0x93, 0x18, 0xb6, 0xc6, 0xd9, 0xd5, 0x48, 0xf6, 0xa7, 0x31, 0xec, - 0x2a, 0x67, 0xeb, 0x91, 0xec, 0xaf, 0xc6, 0xb0, 0x75, 0xce, 0x5e, 0x8f, 0x64, 0x7f, 0x2d, 0x86, - 0xbd, 0xce, 0xd9, 0xb5, 0x48, 0xf6, 0xd7, 0x63, 0xd8, 0x35, 0xce, 0xbe, 0x13, 0xc9, 0xfe, 0x46, - 0x0c, 0xfb, 0x0e, 0x67, 0xdf, 0x8d, 0x64, 0x7f, 0x33, 0x86, 0x7d, 0x97, 0xb3, 0xef, 0x45, 0xb2, - 0xbf, 0x15, 0xc3, 0xbe, 0xc7, 0xd8, 0x6b, 0xab, 0x91, 0xec, 0xcf, 0xa2, 0xd9, 0x6b, 0xab, 0x9c, - 0x1d, 0xad, 0xb5, 0x6f, 0xc7, 0xb0, 0xb9, 0xd6, 0xd6, 0xa2, 0xb5, 0xf6, 0x9d, 0x18, 0x36, 0xd7, - 0xda, 0x5a, 0xb4, 0xd6, 0xbe, 0x1b, 0xc3, 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0xf7, 0x62, 0xd8, - 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x7e, 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0x3f, 0x88, - 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x8f, 0x62, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, - 0xe3, 0x18, 0x36, 0xd7, 0xda, 0x5a, 0xb4, 0xd6, 0xfe, 0x24, 0x86, 0xcd, 0xb5, 0xa6, 0x45, 0x6b, - 0xed, 0x4f, 0xa3, 0xd9, 0x1a, 0xd7, 0x9a, 0x16, 0xad, 0xb5, 0x3f, 0x8b, 0x61, 0x73, 0xad, 0x69, - 0xd1, 0x5a, 0xfb, 0xf3, 0x18, 0x36, 0xd7, 0x9a, 0x16, 0xad, 0xb5, 0x1f, 0xc6, 0xb0, 0xb9, 0xd6, - 0xb4, 0x68, 0xad, 0xfd, 0x45, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0x5f, 0xc6, 0xb0, 0xb9, - 0xd6, 0xb4, 0x68, 0xad, 0xfd, 0x55, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0x5f, 0xc7, 0xb0, - 0xb9, 0xd6, 0xb4, 0x68, 0xad, 0xfd, 0x4d, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0xdf, 0xc6, - 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0xbb, 0x68, 0x76, 0x95, 0x6b, 0xad, 0x1a, 0xad, 0xb5, - 0xbf, 0x8f, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0x0f, 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, - 0xd6, 0xfe, 0x31, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0x8f, 0x62, 0xd8, 0x5c, 0x6b, 0xd5, - 0x68, 0xad, 0xfd, 0x53, 0x0c, 0x9b, 0x6b, 0xad, 0x1a, 0xad, 0xb5, 0x7f, 0x8e, 0x61, 0x73, 0xad, - 0x55, 0xa3, 0xb5, 0xf6, 0x2f, 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x35, 0x86, 0xcd, - 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc5, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x7b, 0x34, - 0x5b, 0xe7, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x1f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0xff, - 0x19, 0xc3, 0xe6, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x5f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, - 0xff, 0x1d, 0xc3, 0xe6, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x3f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, - 0x6b, 0x3f, 0x8e, 0x61, 0x73, 0xad, 0xe9, 0xd1, 0x5a, 0xfb, 0x49, 0x0c, 0x9b, 0x6b, 0x4d, 0x8f, - 0xd6, 0xda, 0xff, 0xc6, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x5f, 0x0c, 0x9b, 0x6b, 0x6d, - 0x3d, 0x5a, 0x6b, 0xff, 0x1f, 0xcd, 0x5e, 0x5f, 0xfd, 0x69, 0x00, 0x00, 0x00, 0xff, 0xff, 0x81, - 0x23, 0xc6, 0xe6, 0xc6, 0x38, 0x00, 0x00, -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto b/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto deleted file mode 100644 index f6071136..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto +++ /dev/null @@ -1,540 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// A feature-rich test file for the protocol compiler and libraries. - -syntax = "proto2"; - -package testdata; - -enum FOO { FOO1 = 1; }; - -message GoEnum { - required FOO foo = 1; -} - -message GoTestField { - required string Label = 1; - required string Type = 2; -} - -message GoTest { - // An enum, for completeness. - enum KIND { - VOID = 0; - - // Basic types - BOOL = 1; - BYTES = 2; - FINGERPRINT = 3; - FLOAT = 4; - INT = 5; - STRING = 6; - TIME = 7; - - // Groupings - TUPLE = 8; - ARRAY = 9; - MAP = 10; - - // Table types - TABLE = 11; - - // Functions - FUNCTION = 12; // last tag - }; - - // Some typical parameters - required KIND Kind = 1; - optional string Table = 2; - optional int32 Param = 3; - - // Required, repeated and optional foreign fields. - required GoTestField RequiredField = 4; - repeated GoTestField RepeatedField = 5; - optional GoTestField OptionalField = 6; - - // Required fields of all basic types - required bool F_Bool_required = 10; - required int32 F_Int32_required = 11; - required int64 F_Int64_required = 12; - required fixed32 F_Fixed32_required = 13; - required fixed64 F_Fixed64_required = 14; - required uint32 F_Uint32_required = 15; - required uint64 F_Uint64_required = 16; - required float F_Float_required = 17; - required double F_Double_required = 18; - required string F_String_required = 19; - required bytes F_Bytes_required = 101; - required sint32 F_Sint32_required = 102; - required sint64 F_Sint64_required = 103; - - // Repeated fields of all basic types - repeated bool F_Bool_repeated = 20; - repeated int32 F_Int32_repeated = 21; - repeated int64 F_Int64_repeated = 22; - repeated fixed32 F_Fixed32_repeated = 23; - repeated fixed64 F_Fixed64_repeated = 24; - repeated uint32 F_Uint32_repeated = 25; - repeated uint64 F_Uint64_repeated = 26; - repeated float F_Float_repeated = 27; - repeated double F_Double_repeated = 28; - repeated string F_String_repeated = 29; - repeated bytes F_Bytes_repeated = 201; - repeated sint32 F_Sint32_repeated = 202; - repeated sint64 F_Sint64_repeated = 203; - - // Optional fields of all basic types - optional bool F_Bool_optional = 30; - optional int32 F_Int32_optional = 31; - optional int64 F_Int64_optional = 32; - optional fixed32 F_Fixed32_optional = 33; - optional fixed64 F_Fixed64_optional = 34; - optional uint32 F_Uint32_optional = 35; - optional uint64 F_Uint64_optional = 36; - optional float F_Float_optional = 37; - optional double F_Double_optional = 38; - optional string F_String_optional = 39; - optional bytes F_Bytes_optional = 301; - optional sint32 F_Sint32_optional = 302; - optional sint64 F_Sint64_optional = 303; - - // Default-valued fields of all basic types - optional bool F_Bool_defaulted = 40 [default=true]; - optional int32 F_Int32_defaulted = 41 [default=32]; - optional int64 F_Int64_defaulted = 42 [default=64]; - optional fixed32 F_Fixed32_defaulted = 43 [default=320]; - optional fixed64 F_Fixed64_defaulted = 44 [default=640]; - optional uint32 F_Uint32_defaulted = 45 [default=3200]; - optional uint64 F_Uint64_defaulted = 46 [default=6400]; - optional float F_Float_defaulted = 47 [default=314159.]; - optional double F_Double_defaulted = 48 [default=271828.]; - optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; - optional sint32 F_Sint32_defaulted = 402 [default = -32]; - optional sint64 F_Sint64_defaulted = 403 [default = -64]; - - // Packed repeated fields (no string or bytes). - repeated bool F_Bool_repeated_packed = 50 [packed=true]; - repeated int32 F_Int32_repeated_packed = 51 [packed=true]; - repeated int64 F_Int64_repeated_packed = 52 [packed=true]; - repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; - repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; - repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; - repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; - repeated float F_Float_repeated_packed = 57 [packed=true]; - repeated double F_Double_repeated_packed = 58 [packed=true]; - repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; - repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; - - // Required, repeated, and optional groups. - required group RequiredGroup = 70 { - required string RequiredField = 71; - }; - - repeated group RepeatedGroup = 80 { - required string RequiredField = 81; - }; - - optional group OptionalGroup = 90 { - required string RequiredField = 91; - }; -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -message GoSkipTest { - required int32 skip_int32 = 11; - required fixed32 skip_fixed32 = 12; - required fixed64 skip_fixed64 = 13; - required string skip_string = 14; - required group SkipGroup = 15 { - required int32 group_int32 = 16; - required string group_string = 17; - } -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -message NonPackedTest { - repeated int32 a = 1; -} - -message PackedTest { - repeated int32 b = 1 [packed=true]; -} - -message MaxTag { - // Maximum possible tag number. - optional string last_field = 536870911; -} - -message OldMessage { - message Nested { - optional string name = 1; - } - optional Nested nested = 1; - - optional int32 num = 2; -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -message NewMessage { - message Nested { - optional string name = 1; - optional string food_group = 2; - } - optional Nested nested = 1; - - // This is an int32 in OldMessage. - optional int64 num = 2; -} - -// Smaller tests for ASCII formatting. - -message InnerMessage { - required string host = 1; - optional int32 port = 2 [default=4000]; - optional bool connected = 3; -} - -message OtherMessage { - optional int64 key = 1; - optional bytes value = 2; - optional float weight = 3; - optional InnerMessage inner = 4; - - extensions 100 to max; -} - -message RequiredInnerMessage { - required InnerMessage leo_finally_won_an_oscar = 1; -} - -message MyMessage { - required int32 count = 1; - optional string name = 2; - optional string quote = 3; - repeated string pet = 4; - optional InnerMessage inner = 5; - repeated OtherMessage others = 6; - optional RequiredInnerMessage we_must_go_deeper = 13; - repeated InnerMessage rep_inner = 12; - - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - }; - optional Color bikeshed = 7; - - optional group SomeGroup = 8 { - optional int32 group_field = 9; - } - - // This field becomes [][]byte in the generated code. - repeated bytes rep_bytes = 10; - - optional double bigfloat = 11; - - extensions 100 to max; -} - -message Ext { - extend MyMessage { - optional Ext more = 103; - optional string text = 104; - optional int32 number = 105; - } - - optional string data = 1; -} - -extend MyMessage { - repeated string greeting = 106; -} - -message ComplexExtension { - optional int32 first = 1; - optional int32 second = 2; - repeated int32 third = 3; -} - -extend OtherMessage { - optional ComplexExtension complex = 200; - repeated ComplexExtension r_complex = 201; -} - -message DefaultsMessage { - enum DefaultsEnum { - ZERO = 0; - ONE = 1; - TWO = 2; - }; - extensions 100 to max; -} - -extend DefaultsMessage { - optional double no_default_double = 101; - optional float no_default_float = 102; - optional int32 no_default_int32 = 103; - optional int64 no_default_int64 = 104; - optional uint32 no_default_uint32 = 105; - optional uint64 no_default_uint64 = 106; - optional sint32 no_default_sint32 = 107; - optional sint64 no_default_sint64 = 108; - optional fixed32 no_default_fixed32 = 109; - optional fixed64 no_default_fixed64 = 110; - optional sfixed32 no_default_sfixed32 = 111; - optional sfixed64 no_default_sfixed64 = 112; - optional bool no_default_bool = 113; - optional string no_default_string = 114; - optional bytes no_default_bytes = 115; - optional DefaultsMessage.DefaultsEnum no_default_enum = 116; - - optional double default_double = 201 [default = 3.1415]; - optional float default_float = 202 [default = 3.14]; - optional int32 default_int32 = 203 [default = 42]; - optional int64 default_int64 = 204 [default = 43]; - optional uint32 default_uint32 = 205 [default = 44]; - optional uint64 default_uint64 = 206 [default = 45]; - optional sint32 default_sint32 = 207 [default = 46]; - optional sint64 default_sint64 = 208 [default = 47]; - optional fixed32 default_fixed32 = 209 [default = 48]; - optional fixed64 default_fixed64 = 210 [default = 49]; - optional sfixed32 default_sfixed32 = 211 [default = 50]; - optional sfixed64 default_sfixed64 = 212 [default = 51]; - optional bool default_bool = 213 [default = true]; - optional string default_string = 214 [default = "Hello, string"]; - optional bytes default_bytes = 215 [default = "Hello, bytes"]; - optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; -} - -message MyMessageSet { - option message_set_wire_format = true; - extensions 100 to max; -} - -message Empty { -} - -extend MyMessageSet { - optional Empty x201 = 201; - optional Empty x202 = 202; - optional Empty x203 = 203; - optional Empty x204 = 204; - optional Empty x205 = 205; - optional Empty x206 = 206; - optional Empty x207 = 207; - optional Empty x208 = 208; - optional Empty x209 = 209; - optional Empty x210 = 210; - optional Empty x211 = 211; - optional Empty x212 = 212; - optional Empty x213 = 213; - optional Empty x214 = 214; - optional Empty x215 = 215; - optional Empty x216 = 216; - optional Empty x217 = 217; - optional Empty x218 = 218; - optional Empty x219 = 219; - optional Empty x220 = 220; - optional Empty x221 = 221; - optional Empty x222 = 222; - optional Empty x223 = 223; - optional Empty x224 = 224; - optional Empty x225 = 225; - optional Empty x226 = 226; - optional Empty x227 = 227; - optional Empty x228 = 228; - optional Empty x229 = 229; - optional Empty x230 = 230; - optional Empty x231 = 231; - optional Empty x232 = 232; - optional Empty x233 = 233; - optional Empty x234 = 234; - optional Empty x235 = 235; - optional Empty x236 = 236; - optional Empty x237 = 237; - optional Empty x238 = 238; - optional Empty x239 = 239; - optional Empty x240 = 240; - optional Empty x241 = 241; - optional Empty x242 = 242; - optional Empty x243 = 243; - optional Empty x244 = 244; - optional Empty x245 = 245; - optional Empty x246 = 246; - optional Empty x247 = 247; - optional Empty x248 = 248; - optional Empty x249 = 249; - optional Empty x250 = 250; -} - -message MessageList { - repeated group Message = 1 { - required string name = 2; - required int32 count = 3; - } -} - -message Strings { - optional string string_field = 1; - optional bytes bytes_field = 2; -} - -message Defaults { - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - } - - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - optional bool F_Bool = 1 [default=true]; - optional int32 F_Int32 = 2 [default=32]; - optional int64 F_Int64 = 3 [default=64]; - optional fixed32 F_Fixed32 = 4 [default=320]; - optional fixed64 F_Fixed64 = 5 [default=640]; - optional uint32 F_Uint32 = 6 [default=3200]; - optional uint64 F_Uint64 = 7 [default=6400]; - optional float F_Float = 8 [default=314159.]; - optional double F_Double = 9 [default=271828.]; - optional string F_String = 10 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes = 11 [default="Bignose"]; - optional sint32 F_Sint32 = 12 [default=-32]; - optional sint64 F_Sint64 = 13 [default=-64]; - optional Color F_Enum = 14 [default=GREEN]; - - // More fields with crazy defaults. - optional float F_Pinf = 15 [default=inf]; - optional float F_Ninf = 16 [default=-inf]; - optional float F_Nan = 17 [default=nan]; - - // Sub-message. - optional SubDefaults sub = 18; - - // Redundant but explicit defaults. - optional string str_zero = 19 [default=""]; -} - -message SubDefaults { - optional int64 n = 1 [default=7]; -} - -message RepeatedEnum { - enum Color { - RED = 1; - } - repeated Color color = 1; -} - -message MoreRepeated { - repeated bool bools = 1; - repeated bool bools_packed = 2 [packed=true]; - repeated int32 ints = 3; - repeated int32 ints_packed = 4 [packed=true]; - repeated int64 int64s_packed = 7 [packed=true]; - repeated string strings = 5; - repeated fixed32 fixeds = 6; -} - -// GroupOld and GroupNew have the same wire format. -// GroupNew has a new field inside a group. - -message GroupOld { - optional group G = 101 { - optional int32 x = 2; - } -} - -message GroupNew { - optional group G = 101 { - optional int32 x = 2; - optional int32 y = 3; - } -} - -message FloatingPoint { - required double f = 1; -} - -message MessageWithMap { - map name_mapping = 1; - map msg_mapping = 2; - map byte_mapping = 3; - map str_to_str = 4; -} - -message Oneof { - oneof union { - bool F_Bool = 1; - int32 F_Int32 = 2; - int64 F_Int64 = 3; - fixed32 F_Fixed32 = 4; - fixed64 F_Fixed64 = 5; - uint32 F_Uint32 = 6; - uint64 F_Uint64 = 7; - float F_Float = 8; - double F_Double = 9; - string F_String = 10; - bytes F_Bytes = 11; - sint32 F_Sint32 = 12; - sint64 F_Sint64 = 13; - MyMessage.Color F_Enum = 14; - GoTestField F_Message = 15; - group F_Group = 16 { - optional int32 x = 17; - } - int32 F_Largest_Tag = 536870911; - } - - oneof tormato { - int32 value = 100; - } -} - -message Communique { - optional bool make_me_cry = 1; - - // This is a oneof, called "union". - oneof union { - int32 number = 5; - string name = 6; - bytes data = 7; - double temp_c = 8; - MyMessage.Color col = 9; - Strings msg = 10; - } -} diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore index e48bab32..5e987350 100644 --- a/vendor/github.com/pierrec/lz4/.gitignore +++ b/vendor/github.com/pierrec/lz4/.gitignore @@ -30,4 +30,5 @@ Temporary Items # End of https://www.gitignore.io/api/macos -lz4c/lz4c +cmd/*/*exe +.idea \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml index 204afe1d..fd6c6db7 100644 --- a/vendor/github.com/pierrec/lz4/.travis.yml +++ b/vendor/github.com/pierrec/lz4/.travis.yml @@ -2,12 +2,12 @@ language: go env: - GO111MODULE=off - - GO111MODULE=on go: - 1.9.x - 1.10.x - 1.11.x + - 1.12.x - master matrix: @@ -20,3 +20,5 @@ sudo: false script: - go test -v -cpu=2 - go test -v -cpu=2 -race + - go test -v -cpu=2 -tags noasm + - go test -v -cpu=2 -race -tags noasm diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md index e71ebd59..4ee388e8 100644 --- a/vendor/github.com/pierrec/lz4/README.md +++ b/vendor/github.com/pierrec/lz4/README.md @@ -1,24 +1,90 @@ -[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4) +# lz4 : LZ4 compression in pure Go -# lz4 -LZ4 compression and decompression in pure Go. +[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4) +[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) +[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) -## Usage +## Overview + +This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. +The implementation is based on the reference C [one](https://github.com/lz4/lz4). + +## Install + +Assuming you have the go toolchain installed: + +``` +go get github.com/pierrec/lz4 +``` + +There is a command line interface tool to compress and decompress LZ4 files. + +``` +go install github.com/pierrec/lz4/cmd/lz4c +``` + +Usage + +``` +Usage of lz4c: + -version + print the program version + +Subcommands: +Compress the given files or from stdin to stdout. +compress [arguments] [ ...] + -bc + enable block checksum + -l int + compression level (0=fastest) + -sc + disable stream checksum + -size string + block max size [64K,256K,1M,4M] (default "4M") + +Uncompress the given files or from stdin to stdout. +uncompress [arguments] [ ...] -```go -import "github.com/pierrec/lz4/v2" ``` -## Description -Package lz4 implements reading and writing lz4 compressed data (a frame), -as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. -This package is **compatible with the LZ4 frame format** although the block level compression -and decompression functions are exposed and are fully compatible with the lz4 block format -definition, they are low level and should not be used directly. +## Example + +``` +// Compress and uncompress an input string. +s := "hello world" +r := strings.NewReader(s) + +// The pipe will uncompress the data from the writer. +pr, pw := io.Pipe() +zw := lz4.NewWriter(pw) +zr := lz4.NewReader(pr) + +go func() { + // Compress the input string. + _, _ = io.Copy(zw, r) + _ = zw.Close() // Make sure the writer is closed + _ = pw.Close() // Terminate the pipe +}() + +_, _ = io.Copy(os.Stdout, zr) + +// Output: +// hello world +``` + +## Contributing + +Contributions are very welcome for bug fixing, performance improvements...! + +- Open an issue with a proper description +- Send a pull request with appropriate test case(s) + +## Contributors -For a complete description of an lz4 compressed block, see: -http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! -See https://github.com/Cyan4973/lz4 for the reference C implementation. +Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. +Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/bench_test.go b/vendor/github.com/pierrec/lz4/bench_test.go deleted file mode 100644 index 72f1eaae..00000000 --- a/vendor/github.com/pierrec/lz4/bench_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package lz4_test - -import ( - "bytes" - "io" - "io/ioutil" - "testing" - - "github.com/pierrec/lz4" -) - -func BenchmarkCompress(b *testing.B) { - var hashTable [1 << 16]int - buf := make([]byte, len(pg1661)) - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - lz4.CompressBlock(pg1661, buf, hashTable[:]) - } -} - -func BenchmarkCompressHC(b *testing.B) { - buf := make([]byte, len(pg1661)) - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - lz4.CompressBlockHC(pg1661, buf, 16) - } -} - -func BenchmarkUncompress(b *testing.B) { - buf := make([]byte, len(pg1661)) - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - lz4.UncompressBlock(pg1661LZ4, buf) - } -} - -func mustLoadFile(f string) []byte { - b, err := ioutil.ReadFile(f) - if err != nil { - panic(err) - } - return b -} - -var ( - pg1661 = mustLoadFile("testdata/pg1661.txt") - digits = mustLoadFile("testdata/e.txt") - twain = mustLoadFile("testdata/Mark.Twain-Tom.Sawyer.txt") - random = mustLoadFile("testdata/random.data") - pg1661LZ4 = mustLoadFile("testdata/pg1661.txt.lz4") - digitsLZ4 = mustLoadFile("testdata/e.txt.lz4") - twainLZ4 = mustLoadFile("testdata/Mark.Twain-Tom.Sawyer.txt.lz4") - randomLZ4 = mustLoadFile("testdata/random.data.lz4") -) - -func benchmarkUncompress(b *testing.B, compressed []byte) { - r := bytes.NewReader(compressed) - zr := lz4.NewReader(r) - - // Determine the uncompressed size of testfile. - uncompressedSize, err := io.Copy(ioutil.Discard, zr) - if err != nil { - b.Fatal(err) - } - - b.SetBytes(uncompressedSize) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - r.Reset(compressed) - zr.Reset(r) - io.Copy(ioutil.Discard, zr) - } -} - -func BenchmarkUncompressPg1661(b *testing.B) { benchmarkUncompress(b, pg1661LZ4) } -func BenchmarkUncompressDigits(b *testing.B) { benchmarkUncompress(b, digitsLZ4) } -func BenchmarkUncompressTwain(b *testing.B) { benchmarkUncompress(b, twainLZ4) } -func BenchmarkUncompressRand(b *testing.B) { benchmarkUncompress(b, randomLZ4) } - -func benchmarkCompress(b *testing.B, uncompressed []byte) { - w := bytes.NewBuffer(nil) - zw := lz4.NewWriter(w) - r := bytes.NewReader(uncompressed) - - // Determine the compressed size of testfile. - compressedSize, err := io.Copy(zw, r) - if err != nil { - b.Fatal(err) - } - if err := zw.Close(); err != nil { - b.Fatal(err) - } - - b.SetBytes(compressedSize) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - r.Reset(uncompressed) - zw.Reset(w) - io.Copy(zw, r) - } -} - -func BenchmarkCompressPg1661(b *testing.B) { benchmarkCompress(b, pg1661) } -func BenchmarkCompressDigits(b *testing.B) { benchmarkCompress(b, digits) } -func BenchmarkCompressTwain(b *testing.B) { benchmarkCompress(b, twain) } -func BenchmarkCompressRand(b *testing.B) { benchmarkCompress(b, random) } diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go index 00b1111b..664d9be5 100644 --- a/vendor/github.com/pierrec/lz4/block.go +++ b/vendor/github.com/pierrec/lz4/block.go @@ -2,21 +2,14 @@ package lz4 import ( "encoding/binary" - "errors" + "math/bits" + "sync" ) -var ( - // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed - // block is corrupted or the destination buffer is not large enough for the uncompressed data. - ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") - // ErrInvalid is returned when reading an invalid LZ4 archive. - ErrInvalid = errors.New("lz4: bad magic number") -) - -// blockHash hashes 4 bytes into a value < winSize. -func blockHash(x uint32) uint32 { - const hasher uint32 = 2654435761 // Knuth multiplicative hash. - return x * hasher >> hashShift +// blockHash hashes the lower 6 bytes into a value < htSize. +func blockHash(x uint64) uint32 { + const prime6bytes = 227718039650203 + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) } // CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. @@ -30,137 +23,127 @@ func CompressBlockBound(n int) int { // The destination buffer must be sized appropriately. // // An error is returned if the source data is invalid or the destination buffer is too small. -func UncompressBlock(src, dst []byte) (si int, err error) { - defer func() { - // It is now faster to let the runtime panic and recover on out of bound slice access - // than checking indices as we go along. - if recover() != nil { - err = ErrInvalidSourceShortBuffer - } - }() - sn := len(src) - if sn == 0 { +func UncompressBlock(src, dst []byte) (int, error) { + if len(src) == 0 { return 0, nil } - var di int - - for { - // Literals and match lengths (token). - b := int(src[si]) - si++ - - // Literals. - if lLen := b >> 4; lLen > 0 { - if lLen == 0xF { - for src[si] == 0xFF { - lLen += 0xFF - si++ - } - lLen += int(src[si]) - si++ - } - i := si - si += lLen - di += copy(dst[di:], src[i:si]) - - if si >= sn { - return di, nil - } - } - - si++ - _ = src[si] // Bound check elimination. - offset := int(src[si-1]) | int(src[si])<<8 - si++ - - // Match. - mLen := b & 0xF - if mLen == 0xF { - for src[si] == 0xFF { - mLen += 0xFF - si++ - } - mLen += int(src[si]) - si++ - } - mLen += minMatch - - // Copy the match. - i := di - offset - if offset > 0 && mLen >= offset { - // Efficiently copy the match dst[di-offset:di] into the dst slice. - bytesToCopy := offset * (mLen / offset) - expanded := dst[i:] - for n := offset; n <= bytesToCopy+offset; n *= 2 { - copy(expanded[n:], expanded[:n]) - } - di += bytesToCopy - mLen -= bytesToCopy - } - di += copy(dst[di:], dst[i:i+mLen]) + if di := decodeBlock(dst, src); di >= 0 { + return di, nil } + return 0, ErrInvalidSourceShortBuffer } // CompressBlock compresses the source buffer into the destination one. // This is the fast version of LZ4 compression and also the default one. -// The size of hashTable must be at least 64Kb. // -// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. +// The argument hashTable is scratch space for a hash table used by the +// compressor. If provided, it should have length at least 1<<16. If it is +// shorter (or nil), CompressBlock allocates its own hash table. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. // // An error is returned if the destination buffer is too small. -func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { - defer func() { - if recover() != nil { - err = ErrInvalidSourceShortBuffer - } - }() - - sn, dn := len(src)-mfLimit, len(dst) - if sn <= 0 || dn == 0 { - return 0, nil +func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + if len(hashTable) < htSize { + htIface := htPool.Get() + defer htPool.Put(htIface) + hashTable = (*(htIface).(*[htSize]int))[:] + } + // Prove to the compiler the table has at least htSize elements. + // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. + hashTable = hashTable[:htSize] + + // si: Current position of the search. + // anchor: Position of the current literals. + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals } - var si int // Fast scan strategy: the hash table only stores the last 4 bytes sequences. - // const accInit = 1 << skipStrength - - anchor := si // Position of the current literals. - // acc := accInit // Variable step: improves performance on non-compressible data. - for si < sn { - // Hash the next 4 bytes (sequence)... - match := binary.LittleEndian.Uint32(src[si:]) + // Hash the next 6 bytes (sequence)... + match := binary.LittleEndian.Uint64(src[si:]) h := blockHash(match) + h2 := blockHash(match >> 8) + // We check a match at s, s+1 and s+2 and pick the first one we get. + // Checking 3 only requires us to load the source one. ref := hashTable[h] + ref2 := hashTable[h2] hashTable[h] = si - if ref >= sn { // Invalid reference (dirty hashtable). - si++ - continue - } + hashTable[h2] = si + 1 offset := si - ref + + // If offset <= 0 we got an old entry in the hash table. if offset <= 0 || offset >= winSize || // Out of window. - match != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. - // si += acc >> skipStrength - // acc++ - si++ - continue + uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. + // No match. Start calculating another hash. + // The processor can usually do this out-of-order. + h = blockHash(match >> 16) + ref = hashTable[h] + + // Check the second match at si+1 + si += 1 + offset = si - ref2 + + if offset <= 0 || offset >= winSize || + uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { + // No match. Check the third match at si+2 + si += 1 + offset = si - ref + hashTable[h] = si + + if offset <= 0 || offset >= winSize || + uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { + // Skip one extra byte (at si+3) before we check 3 matches again. + si += 2 + (si-anchor)>>adaptSkipLog + continue + } + } } // Match found. - // acc = accInit lLen := si - anchor // Literal length. - - // Encode match length part 1. - si += minMatch - mLen := si // Match length has minMatch already. - // Find the longest match, first looking by batches of 8 bytes. - for si < sn && binary.LittleEndian.Uint64(src[si:]) == binary.LittleEndian.Uint64(src[si-offset:]) { - si += 8 + // We already matched 4 bytes. + mLen := 4 + + // Extend backwards if we can, reducing literals. + tOff := si - offset - 1 + for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { + si-- + tOff-- + lLen-- + mLen++ } - // Then byte by byte. - for si < sn && src[si] == src[si-offset] { - si++ + + // Add the match length, so we continue search at the end. + // Use mLen to store the offset base. + si, mLen = si+mLen, si+minMatch + + // Find the longest match by looking by batches of 8 bytes. + for si+8 < sn { + x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) + if x == 0 { + si += 8 + } else { + // Stop is first non-zero byte. + si += bits.TrailingZeros64(x) >> 3 + break + } } mLen = si - mLen @@ -186,7 +169,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { di++ // Literals. - copy(dst[di:], src[anchor:anchor+lLen]) + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) di += lLen + 2 anchor = si @@ -203,9 +186,17 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { dst[di] = byte(mLen) di++ } + // Check if we can load next values. + if si >= sn { + break + } + // Hash match end-2 + h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) + hashTable[h] = si - 2 } - if anchor == 0 { +lastLiterals: + if isNotCompressible && anchor == 0 { // Incompressible. return 0, nil } @@ -226,48 +217,68 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { di++ // Write the last literals. - if di >= anchor { + if isNotCompressible && di >= anchor { // Incompressible. return 0, nil } - di += copy(dst[di:], src[anchor:]) + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) return di, nil } +// Pool of hash tables for CompressBlock. +var htPool = sync.Pool{ + New: func() interface{} { + return new([htSize]int) + }, +} + +// blockHash hashes 4 bytes into a value < winSize. +func blockHashHC(x uint32) uint32 { + const hasher uint32 = 2654435761 // Knuth multiplicative hash. + return x * hasher >> (32 - winSizeLog) +} + // CompressBlockHC compresses the source buffer src into the destination dst // with max search depth (use 0 or negative value for no max). // // CompressBlockHC compression ratio is better than CompressBlock but it is also slower. // -// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. // // An error is returned if the destination buffer is too small. -func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { - defer func() { - if recover() != nil { - err = ErrInvalidSourceShortBuffer - } - }() +func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { + defer recoverBlock(&err) - sn, dn := len(src)-mfLimit, len(dst) - if sn <= 0 || dn == 0 { - return 0, nil - } - var si int + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + var si, di, anchor int // hashTable: stores the last position found for a given hash - // chaingTable: stores previous positions for a given hash + // chainTable: stores previous positions for a given hash var hashTable, chainTable [winSize]int if depth <= 0 { depth = winSize } - anchor := si + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + for si < sn { // Hash the next 4 bytes (sequence). match := binary.LittleEndian.Uint32(src[si:]) - h := blockHash(match) + h := blockHashHC(match) // Follow the chain until out of window and give the longest match. mLen := 0 @@ -280,11 +291,15 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { } ml := 0 // Compare the current position with a previous with the same hash. - for ml < sn-si && binary.LittleEndian.Uint64(src[next+ml:]) == binary.LittleEndian.Uint64(src[si+ml:]) { - ml += 8 - } - for ml < sn-si && src[next+ml] == src[si+ml] { - ml++ + for ml < sn-si { + x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) + if x == 0 { + ml += 8 + } else { + // Stop is first non-zero byte. + ml += bits.TrailingZeros64(x) >> 3 + break + } } if ml < minMatch || ml <= mLen { // Match too small (>adaptSkipLog continue } @@ -315,7 +330,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { for si, ml := winStart, si+mLen; si < ml; { match >>= 8 match |= uint32(src[si+3]) << 24 - h := blockHash(match) + h := blockHashHC(match) chainTable[si&winMask] = hashTable[h] hashTable[h] = si si++ @@ -347,7 +362,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { di++ // Literals. - copy(dst[di:], src[anchor:anchor+lLen]) + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) di += lLen anchor = si @@ -366,12 +381,13 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { } } - if anchor == 0 { + if isNotCompressible && anchor == 0 { // Incompressible. return 0, nil } // Last literals. +lastLiterals: lLen := len(src) - anchor if lLen < 0xF { dst[di] = byte(lLen << 4) @@ -388,10 +404,10 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { di++ // Write the last literals. - if di >= anchor { + if isNotCompressible && di >= anchor { // Incompressible. return 0, nil } - di += copy(dst[di:], src[anchor:]) + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) return di, nil } diff --git a/vendor/github.com/pierrec/lz4/block_test.go b/vendor/github.com/pierrec/lz4/block_test.go deleted file mode 100644 index 312160bb..00000000 --- a/vendor/github.com/pierrec/lz4/block_test.go +++ /dev/null @@ -1,98 +0,0 @@ -//+build go1.9 - -package lz4_test - -import ( - "fmt" - "io/ioutil" - "reflect" - "testing" - - "github.com/pierrec/lz4" -) - -type testcase struct { - file string - compressible bool - src []byte -} - -var rawFiles = []testcase{ - // {"testdata/207326ba-36f8-11e7-954a-aca46ba8ca73.png", true, nil}, - {"testdata/e.txt", true, nil}, - {"testdata/gettysburg.txt", true, nil}, - {"testdata/Mark.Twain-Tom.Sawyer.txt", true, nil}, - {"testdata/pg1661.txt", true, nil}, - {"testdata/pi.txt", true, nil}, - {"testdata/random.data", false, nil}, - {"testdata/repeat.txt", true, nil}, -} - -func TestCompressUncompressBlock(t *testing.T) { - type compressor func(s, d []byte) (int, error) - - run := func(tc testcase, compress compressor) int { - t.Helper() - src := tc.src - - // Compress the data. - zbuf := make([]byte, lz4.CompressBlockBound(len(src))) - n, err := compress(src, zbuf) - if err != nil { - t.Error(err) - return 0 - } - zbuf = zbuf[:n] - - // Make sure that it was actually compressed unless not compressible. - if !tc.compressible { - return 0 - } - - if n == 0 || n >= len(src) { - t.Errorf("data not compressed: %d/%d", n, len(src)) - return 0 - } - - // Uncompress the data. - buf := make([]byte, len(src)) - n, err = lz4.UncompressBlock(zbuf, buf) - if err != nil { - t.Fatal(err) - } - buf = buf[:n] - if !reflect.DeepEqual(src, buf) { - t.Error("uncompressed compressed data not matching initial input") - return 0 - } - - return len(zbuf) - } - - for _, tc := range rawFiles { - src, err := ioutil.ReadFile(tc.file) - if err != nil { - t.Fatal(err) - } - tc.src = src - - var n, nhc int - t.Run("", func(t *testing.T) { - tc := tc - t.Run(tc.file, func(t *testing.T) { - t.Parallel() - n = run(tc, func(src, dst []byte) (int, error) { - var ht [1 << 16]int - return lz4.CompressBlock(src, dst, ht[:]) - }) - }) - t.Run(fmt.Sprintf("%s HC", tc.file), func(t *testing.T) { - t.Parallel() - nhc = run(tc, func(src, dst []byte) (int, error) { - return lz4.CompressBlockHC(src, dst, -1) - }) - }) - }) - fmt.Printf("%-40s: %8d / %8d / %8d\n", tc.file, n, nhc, len(src)) - } -} diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go new file mode 100644 index 00000000..43cc14fb --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.go @@ -0,0 +1,8 @@ +// +build !appengine +// +build gc +// +build !noasm + +package lz4 + +//go:noescape +func decodeBlock(dst, src []byte) int diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s new file mode 100644 index 00000000..20fef397 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.s @@ -0,0 +1,375 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// AX scratch +// BX scratch +// CX scratch +// DX token +// +// DI &dst +// SI &src +// R8 &dst + len(dst) +// R9 &src + len(src) +// R11 &dst +// R12 short output end +// R13 short input end +// func decodeBlock(dst, src []byte) int +// using 50 bytes of stack currently +TEXT ·decodeBlock(SB), NOSPLIT, $64-56 + MOVQ dst_base+0(FP), DI + MOVQ DI, R11 + MOVQ dst_len+8(FP), R8 + ADDQ DI, R8 + + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R9 + ADDQ SI, R9 + + // shortcut ends + // short output end + MOVQ R8, R12 + SUBQ $32, R12 + // short input end + MOVQ R9, R13 + SUBQ $16, R13 + +loop: + // for si < len(src) + CMPQ SI, R9 + JGE end + + // token := uint32(src[si]) + MOVBQZX (SI), DX + INCQ SI + + // lit_len = token >> 4 + // if lit_len > 0 + // CX = lit_len + MOVQ DX, CX + SHRQ $4, CX + + // if lit_len != 0xF + CMPQ CX, $0xF + JEQ lit_len_loop_pre + CMPQ DI, R12 + JGE lit_len_loop_pre + CMPQ SI, R13 + JGE lit_len_loop_pre + + // copy shortcut + + // A two-stage shortcut for the most common case: + // 1) If the literal length is 0..14, and there is enough space, + // enter the shortcut and copy 16 bytes on behalf of the literals + // (in the fast mode, only 8 bytes can be safely copied this way). + // 2) Further if the match length is 4..18, copy 18 bytes in a similar + // manner; but we ensure that there's enough space in the output for + // those 18 bytes earlier, upon entering the shortcut (in other words, + // there is a combined check for both stages). + + // copy literal + MOVOU (SI), X0 + MOVOU X0, (DI) + ADDQ CX, DI + ADDQ CX, SI + + MOVQ DX, CX + ANDQ $0xF, CX + + // The second stage: prepare for match copying, decode full info. + // If it doesn't work out, the info won't be wasted. + // offset := uint16(data[:2]) + MOVWQZX (SI), DX + ADDQ $2, SI + + MOVQ DI, AX + SUBQ DX, AX + CMPQ AX, DI + JGT err_short_buf + + // if we can't do the second stage then jump straight to read the + // match length, we already have the offset. + CMPQ CX, $0xF + JEQ match_len_loop_pre + CMPQ DX, $8 + JLT match_len_loop_pre + CMPQ AX, R11 + JLT err_short_buf + + // memcpy(op + 0, match + 0, 8); + MOVQ (AX), BX + MOVQ BX, (DI) + // memcpy(op + 8, match + 8, 8); + MOVQ 8(AX), BX + MOVQ BX, 8(DI) + // memcpy(op +16, match +16, 2); + MOVW 16(AX), BX + MOVW BX, 16(DI) + + ADDQ $4, DI // minmatch + ADDQ CX, DI + + // shortcut complete, load next token + JMP loop + +lit_len_loop_pre: + // if lit_len > 0 + CMPQ CX, $0 + JEQ offset + CMPQ CX, $0xF + JNE copy_literal + +lit_len_loop: + // for src[si] == 0xFF + CMPB (SI), $0xFF + JNE lit_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + // lit_len += 0xFF + ADDQ $0xFF, CX + INCQ SI + JMP lit_len_loop + +lit_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_literal: + // bounds check src and dst + MOVQ SI, AX + ADDQ CX, AX + CMPQ AX, R9 + JGT err_short_buf + + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // whats a good cut off to call memmove? + CMPQ CX, $16 + JGT memmove_lit + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_lit + + // if len(src[si:]) < 16 + MOVQ R9, AX + SUBQ SI, AX + CMPQ AX, $16 + JLT memmove_lit + + MOVOU (SI), X0 + MOVOU X0, (DI) + + JMP finish_lit_copy + +memmove_lit: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + MOVB DX, 48(SP) + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVB 48(SP), DX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + +finish_lit_copy: + ADDQ CX, SI + ADDQ CX, DI + + CMPQ SI, R9 + JGE end + +offset: + // CX := mLen + // free up DX to use for offset + MOVQ DX, CX + + MOVQ SI, AX + ADDQ $2, AX + CMPQ AX, R9 + JGT err_short_buf + + // offset + // DX := int(src[si]) | int(src[si+1])<<8 + MOVWQZX (SI), DX + ADDQ $2, SI + + // 0 offset is invalid + CMPQ DX, $0 + JEQ err_corrupt + + ANDB $0xF, CX + +match_len_loop_pre: + // if mlen != 0xF + CMPB CX, $0xF + JNE copy_match + +match_len_loop: + // for src[si] == 0xFF + // lit_len += 0xFF + CMPB (SI), $0xFF + JNE match_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + ADDQ $0xFF, CX + INCQ SI + JMP match_len_loop + +match_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_match: + // mLen += minMatch + ADDQ $4, CX + + // check we have match_len bytes left in dst + // di+match_len < len(dst) + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // DX = offset + // CX = match_len + // BX = &dst + (di - offset) + MOVQ DI, BX + SUBQ DX, BX + + // check BX is within dst + // if BX < &dst + CMPQ BX, R11 + JLT err_short_buf + + // if offset + match_len < di + MOVQ BX, AX + ADDQ CX, AX + CMPQ DI, AX + JGT copy_interior_match + + // AX := len(dst[:di]) + // MOVQ DI, AX + // SUBQ R11, AX + + // copy 16 bytes at a time + // if di-offset < 16 copy 16-(di-offset) bytes to di + // then do the remaining + +copy_match_loop: + // for match_len >= 0 + // dst[di] = dst[i] + // di++ + // i++ + MOVB (BX), AX + MOVB AX, (DI) + INCQ DI + INCQ BX + DECQ CX + + CMPQ CX, $0 + JGT copy_match_loop + + JMP loop + +copy_interior_match: + CMPQ CX, $16 + JGT memmove_match + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_match + + MOVOU (BX), X0 + MOVOU X0, (DI) + + ADDQ CX, DI + JMP loop + +memmove_match: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + + ADDQ CX, DI + JMP loop + +err_corrupt: + MOVQ $-1, ret+48(FP) + RET + +err_short_buf: + MOVQ $-2, ret+48(FP) + RET + +end: + SUBQ R11, DI + MOVQ DI, ret+48(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go new file mode 100644 index 00000000..919888ed --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_other.go @@ -0,0 +1,98 @@ +// +build !amd64 appengine !gc noasm + +package lz4 + +func decodeBlock(dst, src []byte) (ret int) { + const hasError = -2 + defer func() { + if recover() != nil { + ret = hasError + } + }() + + var si, di int + for { + // Literals and match lengths (token). + b := int(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + switch { + case lLen < 0xF && si+16 < len(src): + // Shortcut 1 + // if we have enough room in src and dst, and the literals length + // is small enough (0..14) then copy all 16 bytes, even if not all + // are part of the literals. + copy(dst[di:], src[si:si+16]) + si += lLen + di += lLen + if mLen := b & 0xF; mLen < 0xF { + // Shortcut 2 + // if the match length (4..18) fits within the literals, then copy + // all 18 bytes, even if not all are part of the literals. + mLen += 4 + if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { + i := di - offset + end := i + 18 + if end > len(dst) { + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + end = len(dst) + } + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } + } + case lLen == 0xF: + for src[si] == 0xFF { + lLen += 0xFF + si++ + } + lLen += int(src[si]) + si++ + fallthrough + default: + copy(dst[di:di+lLen], src[si:si+lLen]) + si += lLen + di += lLen + } + } + if si >= len(src) { + return di + } + + offset := int(src[si]) | int(src[si+1])<<8 + if offset == 0 { + return hasError + } + si += 2 + + // Match. + mLen := b & 0xF + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + si++ + } + mLen += int(src[si]) + si++ + } + mLen += minMatch + + // Copy the match. + expanded := dst[di-offset:] + if mLen > offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += copy(dst[di:di+mLen], expanded[:mLen]) + } +} diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go new file mode 100644 index 00000000..1c45d181 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/errors.go @@ -0,0 +1,30 @@ +package lz4 + +import ( + "errors" + "fmt" + "os" + rdebug "runtime/debug" +) + +var ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") + // ErrInvalid is returned when reading an invalid LZ4 archive. + ErrInvalid = errors.New("lz4: bad magic number") + // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. + ErrBlockDependency = errors.New("lz4: block dependency not supported") + // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. + ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") +) + +func recoverBlock(e *error) { + if r := recover(); r != nil && *e == nil { + if debugFlag { + fmt.Fprintln(os.Stderr, r) + rdebug.PrintStack() + } + *e = ErrInvalidSourceShortBuffer + } +} diff --git a/vendor/github.com/pierrec/lz4/export_test.go b/vendor/github.com/pierrec/lz4/export_test.go deleted file mode 100644 index 3052506d..00000000 --- a/vendor/github.com/pierrec/lz4/export_test.go +++ /dev/null @@ -1,13 +0,0 @@ -// Expose some internals for testing purposes -package lz4 - -// expose the possible block max sizes -var BlockMaxSizeItems []int - -func init() { - for s := range bsMapValue { - BlockMaxSizeItems = append(BlockMaxSizeItems, s) - } -} - -var FrameSkipMagic = frameSkipMagic diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/01572067d493db8dc8161f05c339a5192b0b4087-22 b/vendor/github.com/pierrec/lz4/fuzz/corpus/01572067d493db8dc8161f05c339a5192b0b4087-22 deleted file mode 100644 index 4b8b629d5449a3467afe85535aa7eec4223bac59..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 105 zcmZQk@|DO-Y&o%a|NB@5Mh0F628Npd{~H(>B=VRT6c`zV85kNQ3=J)f3@nW-EKN-f e%z-KxKUFp4G@E%rU3w`^c_n8 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/02766f768fbfbd81b752cce427eb5242a44929cc-5 b/vendor/github.com/pierrec/lz4/fuzz/corpus/02766f768fbfbd81b752cce427eb5242a44929cc-5 deleted file mode 100644 index ead0ac0d319ff48ac6dc14c1db920ee00ceb917a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19 ZcmZQk@|DO-Y&o%a|NB@5Mg|~g002Oq1@Hg> diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/032f04032e12567057782672bb12670c20d38439-10 b/vendor/github.com/pierrec/lz4/fuzz/corpus/032f04032e12567057782672bb12670c20d38439-10 deleted file mode 100755 index f1a389337beb616a2f139bf9205844ad38db9044..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23 dcmZQk@|Ey)P+GZn|N8>%=qn6B;H2d%0RUK(2I>F+ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/0367b985641aca66e6e4eeea68acf5e2a02c62a8-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/0367b985641aca66e6e4eeea68acf5e2a02c62a8-16 deleted file mode 100644 index 753a67cd3ca9f4e93e6370b727a4729ed05f2bfe..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38 scmZQk@|DO-Y&o%a|NB@5Mg}GZh6V`+ULaZX|9=AmgG3$^g957r0O39gRsaA1 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/03e85abc49352b2f7cc83efd7e4274da02d78b84-6 b/vendor/github.com/pierrec/lz4/fuzz/corpus/03e85abc49352b2f7cc83efd7e4274da02d78b84-6 deleted file mode 100644 index d2017a91adb9aca4ffd9ec8a84fd76ffc43f5083..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13 RcmZS4@|9o!0u~@W82||00lEMH diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/049f82a81bb6b4d7cf69fac5e413f6ce299d48cf-8 b/vendor/github.com/pierrec/lz4/fuzz/corpus/049f82a81bb6b4d7cf69fac5e413f6ce299d48cf-8 deleted file mode 100755 index a69e01f280889b4a8249ce9b8cb378b796c262e0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19 acmZQk@|Ey)U|?|4^5tPrXJlYF#{>Wye*)G3 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/04c05c7956f17e57a91a47909bd0706135cf17a6-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/04c05c7956f17e57a91a47909bd0706135cf17a6-1 deleted file mode 100755 index 0ea966b8f7cc3049888e796ad3ccdd9f375d71b6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 46 zcmZ>Y$}l!`j8qGbjGmAd!oVOV-{8Qmq@d3d!eIaYj(`F~O1^kq;)OQ06z*mIMgS$D B4Kx4% diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/050e2af2a57d8044139ba21375f0ac6fcb7ab0b1-12 b/vendor/github.com/pierrec/lz4/fuzz/corpus/050e2af2a57d8044139ba21375f0ac6fcb7ab0b1-12 deleted file mode 100755 index 6cb971272a19d3fa9fddcb0617ef95cb36991fbc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 84 XcmZ>Y%CHbGa|KV04m2CKmY&$ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/0547c73efb9b6a345fd9a52aa0798b48dd9aca62-2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/0547c73efb9b6a345fd9a52aa0798b48dd9aca62-2 deleted file mode 100755 index e2f4639b3b98709980a056683b5217dd0870c8f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33 pcmZ>Y$}lu^j8qGbEVa7Mz`*dT=HS0UrPW diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/0990ac54decbca1a97893e83c7feb2be89cb10ea-14 b/vendor/github.com/pierrec/lz4/fuzz/corpus/0990ac54decbca1a97893e83c7feb2be89cb10ea-14 deleted file mode 100644 index 37a0194e55dcdb47987d0f668a3ddc54ffaf1529..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 68 ocmZQk@|DO-Y&o%a|NB@5Mg}GZ1_lW~Fqwx$Ld20pU~CW#0Ig98lmGw# diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/09f2eda28ecc97304659afded4d13a188baf2107-22 b/vendor/github.com/pierrec/lz4/fuzz/corpus/09f2eda28ecc97304659afded4d13a188baf2107-22 deleted file mode 100644 index c1b1ad50619559026aeac86cefc84eda3e3a405f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 zcmZQk@|Ey)U|?|4^5tPrXH;NdV2D4RY}6vb%Y%CHbGa08$7BS^xk5 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/0cf885cd35e7124005b0ba0c3c4431ddfaeff84d-11 b/vendor/github.com/pierrec/lz4/fuzz/corpus/0cf885cd35e7124005b0ba0c3c4431ddfaeff84d-11 deleted file mode 100644 index b45509f6e5afba39a7f6aa400b98857af321b0f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48 ScmZS4@|9o!0u~}@kQxA6pacd0 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/0d7c02d4e91d82b0355baaca1237062639442db6-3 b/vendor/github.com/pierrec/lz4/fuzz/corpus/0d7c02d4e91d82b0355baaca1237062639442db6-3 deleted file mode 100644 index 66665729..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/0d7c02d4e91d82b0355baaca1237062639442db6-3 +++ /dev/null @@ -1 +0,0 @@ -"M@5 \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/0e1b2b0c49dfb86fe01d3453dd24e39482e132e8-7 b/vendor/github.com/pierrec/lz4/fuzz/corpus/0e1b2b0c49dfb86fe01d3453dd24e39482e132e8-7 deleted file mode 100644 index 8ca78705a4e4e7465e5326de594a8bccfe422481..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 21 bcmZQk@|DO-Y&o%a|NB@5Mg}GZAaDc#OG^c_ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/1.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/1.bz2 deleted file mode 100755 index c21a363248ee3e69300c4f872699e14d94b64609..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42 ycmZ>Y$}lu^j8qGbWG}V6&cMLH$Y9@~z+lM3aO6Y$}ki!aB=VRT6ih(^4M27xNDQn(7AT+u5(ddY JltL+}HURCk8Dsze diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/10fa5d9f0fe75f73c0e92a1fe1c00f0041ec8f39-24 b/vendor/github.com/pierrec/lz4/fuzz/corpus/10fa5d9f0fe75f73c0e92a1fe1c00f0041ec8f39-24 deleted file mode 100644 index 62459e7280d635ee35f1980023b924ec33fd2c66..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 zcmZQk@|Ey)U|?|4^5tPrXH;NdV2D4RY}6vb%z8{OoL FL;wi63k(1N diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/11.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/11.bz2 deleted file mode 100755 index b8539d875265721333033cef148a8ddb5b49cee6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14 TcmZ>Y$}kcyaY$}ko$aY$}kZxaY$}kl#aB=VRT6c`y~85kOrK*B)O0Fi-GP*nhK C#S`%W diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/142d4f8cb427dd3562d72d889dfc0ea3a2b03d98-22 b/vendor/github.com/pierrec/lz4/fuzz/corpus/142d4f8cb427dd3562d72d889dfc0ea3a2b03d98-22 deleted file mode 100644 index d162c38e792e97a45e2f10a9bbc70df796f7e142..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 83 bcmZQk@|DO-Y&o%a|NB@5Mg|~gpcDfDw7d=Q diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/15.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/15.bz2 deleted file mode 100755 index bd2c29c27df1ef3674b1ac103ab04003f810e7c7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14 TcmZ>Y$}kfzaY$}lo?j8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42DdC He^MI&cccu4 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/16.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/16.bz2 deleted file mode 100755 index 5e13f64415b9b631a4303809662903f269bf3e4d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14 TcmZ>Y$}kr%aY%CHbGaY$}lu^j8qGbRA4aVVK{Oq^<3Y%CHnKaY$}lu^j8qGb>{?*|jDdkQkU@ljfnf)O0)vuKS~Eklyp{{s!*Z42R!)vp%>NPq DGh+=a diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/191e0dd24b8c7f8babeae4839768df39acc17eb1-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/191e0dd24b8c7f8babeae4839768df39acc17eb1-17 deleted file mode 100644 index 000242c63608a1181bc2c414ded8cc81b0d4bb8e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 lcmZQk@|DO-Y&o%a|NB@5Mh0F628R0oAdt?^&cG||4gkZR3)27q diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/1971e6ed6c6f6069fc2a9ed3038101e89bbcc381-26 b/vendor/github.com/pierrec/lz4/fuzz/corpus/1971e6ed6c6f6069fc2a9ed3038101e89bbcc381-26 deleted file mode 100644 index 073f103567acca4e3bb2adc17ec3ce85ece940d7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 68 qcmZQk@|DO;Y&o%azuNm)21X_Z1_lW~FqsD;>lwi81|SnBZU6w$8w;EO diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/1a582381781f264f551bd6f0f2284a931147e6d9-4 b/vendor/github.com/pierrec/lz4/fuzz/corpus/1a582381781f264f551bd6f0f2284a931147e6d9-4 deleted file mode 100755 index c02b40509a98970d9d91af2f9b6dfbf090a79955..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9 QcmZ?L@|EzFU|?VX00=h$Hvj+t diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/1a58f02dc83ac8315a85babdea6d757cbff2bb03-30 b/vendor/github.com/pierrec/lz4/fuzz/corpus/1a58f02dc83ac8315a85babdea6d757cbff2bb03-30 deleted file mode 100644 index d589b761ceaca3f9e4d5494faed7ce2f601e939f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 66 xcmZQk@|DO-Y&o%a|NB@5Mh0F6hK3qs0RfB=VRT6c`zV85kNQ3=J)f3@nW-EKN-f e%z-KxKUFp4K-j2Y8n8g?j1q^ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/1c944d5065b1a2b30e412604a14aa52565a5765b-35 b/vendor/github.com/pierrec/lz4/fuzz/corpus/1c944d5065b1a2b30e412604a14aa52565a5765b-35 deleted file mode 100644 index da130d44bce039ad904b05c916480b0c55a1af18..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44 scmZQk@|CDdY&o%a|NB@5#_hZe3=K6Bd2j&+`)!$b8GwTSfB*jv0DaLC?*IS* diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/1d37fb332301cf7de0bd51a8c1aa9be4935e89fc-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/1d37fb332301cf7de0bd51a8c1aa9be4935e89fc-1 deleted file mode 100644 index ee12c7a5..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/1d37fb332301cf7de0bd51a8c1aa9be4935e89fc-1 +++ /dev/null @@ -1 +0,0 @@ -"M \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/1d6b87b52e62cb84be834478ad88129f5e1f247b-9 b/vendor/github.com/pierrec/lz4/fuzz/corpus/1d6b87b52e62cb84be834478ad88129f5e1f247b-9 deleted file mode 100644 index bd5ff8ff1fa0e21cada483100c931b2c0509ea8a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 icmZQk@|DO-Y&o%a|NB@5Mg}GZ1_p^d2niHrNCW_vPza6y diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/1ec2f11a8d8b9cf188a58f673a0b4a8608a926ca-3 b/vendor/github.com/pierrec/lz4/fuzz/corpus/1ec2f11a8d8b9cf188a58f673a0b4a8608a926ca-3 deleted file mode 100644 index 7eeb2e86..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/1ec2f11a8d8b9cf188a58f673a0b4a8608a926ca-3 +++ /dev/null @@ -1 +0,0 @@ -"M3 \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/1fc2ba0bb981fec47badea1c80219452c9e3c76c-22 b/vendor/github.com/pierrec/lz4/fuzz/corpus/1fc2ba0bb981fec47badea1c80219452c9e3c76c-22 deleted file mode 100644 index b53b47a542019475666d8c29698187d4d7d018c9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mg}GZ1_lWRULaWm#6Wf)6N7>&NT8txS)D9UKnWxamihm` M0i*y-feDE`0G!1a#sB~S diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/1fd8444ac43541c44a1c6ed8df2f688b1fa09681-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/1fd8444ac43541c44a1c6ed8df2f688b1fa09681-1 deleted file mode 100755 index f3fe56210bc1e92f2456695a25d79a5f01b7af13..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 29 kcmZ>Y$}lu^j8qGbWG}V6&cMLH$Y9@~z`(Y$}lo?j8qGbWG}V6&cMLH$Y9@~z+lM3aO6Y$}lo?j8qGb>{?*|jDdkQkU@ljfnf)O0)vuKS~Eklyp{{s!*Z42R!)vp%>NPq DGms4| diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/202a9c8b188cae90f29bce3bf0438a035c504eb4-20 b/vendor/github.com/pierrec/lz4/fuzz/corpus/202a9c8b188cae90f29bce3bf0438a035c504eb4-20 deleted file mode 100644 index 0014dc68bb5ab4a287ce9ac3fee91de7158de0d2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 62 hcmZQk@|Ey#P+GZn|N8=^=qn6Bz(R~r1H%=dMgVf`6d(Wq diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/2065ba3177c7dc5047742faa7158b3faeaac1f3c-32 b/vendor/github.com/pierrec/lz4/fuzz/corpus/2065ba3177c7dc5047742faa7158b3faeaac1f3c-32 deleted file mode 100644 index 8f24c4c02577e35325b95a4f58525772a6b72780..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_q-%hIPyg5_wDv{~Z~GfdW9m6o!VH|Nk2p7{G)? Po-C3uR8JlQlAb&O1KSma diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/20cf0057443ecb322ff1169ecbe6cf20250f15af-13 b/vendor/github.com/pierrec/lz4/fuzz/corpus/20cf0057443ecb322ff1169ecbe6cf20250f15af-13 deleted file mode 100644 index f83424bf2b5731f4849848410e7298b21be08baa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 icmZQk@|DO-Y&o%a|NB@5Mh0F628RC-kjDgMO8@}S8w~yc diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/20d1a26afe563ad77e7a95fbee6ff59ebf3e61ab-13 b/vendor/github.com/pierrec/lz4/fuzz/corpus/20d1a26afe563ad77e7a95fbee6ff59ebf3e61ab-13 deleted file mode 100644 index ecd0320fc6c4619a9705f58e75c34e3817de2cc0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 46 gcmZQk@|DO-Y&o%a|NB@5Mg}GZ1_p^d93(>?0Qg!67ytkO diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/21.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/21.bz2 deleted file mode 100755 index 0701d3d648e96b370bc62da1aaa0ab9d125cb2ff..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48 zcmZ>Y$}l!`j8qGb>{?*|jDdkQkU@ljfnf)O0)vuKS~Eklyp{{s!*Z42R!)vp%>NPq DGrbKh diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/21c8be1bb9eeea5b141500dee4987ab7fbd40d4a-23 b/vendor/github.com/pierrec/lz4/fuzz/corpus/21c8be1bb9eeea5b141500dee4987ab7fbd40d4a-23 deleted file mode 100644 index 222b09581f5b34d1375ac85c970706b75a71808b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20 bcmZQk@|DO-Y&o%a|NB@5Mg|84h6V-zN6rQ? diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/22.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/22.bz2 deleted file mode 100755 index 25578e2dbe9903b2544519f846e4e149f2b37ca1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48 zcmZ>Y$}ll>j8qGb>{?*|jDdkQkU@ljfnf)O0)vuKS~Eklyp{{s!*Z42R!)vp%>NPq DGwKa4 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/2201e32d052c15874f0323a09c330f3666029a72-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/2201e32d052c15874f0323a09c330f3666029a72-1 deleted file mode 100755 index c4353fe81683aeb72f1cc8ec101a86d5391353a4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1473 zcmV;y1wQ&hT4*^jL0KkKS(G)X0|3c~|A2H5p~Qdp|M>s^|LQ;g|L|hUKKsA`6YlAD zc`I*Yq>cap03SdA000000Pe>@o%ZE+&|9wQdZK`RbC%m4%MG;|_Z&S3uFrNr00Yvf zPytkx3ZMko01IFM1MbBD=%4@+NF%Ic3;?8kv>bPxFFi;`@6aWAeqKW_jHKKq5loTBl0MJoVC{aKaD`r6|I>_9? zM-U21B2p*-P%g%l=nSX;C@ZZ12HN{%DIf&^prud%0HT0U0)POJ0HHt>000F508@wo zs-*y+P(lKPiUD5i0011Q3ZqJiAt#e00)2o9%b#m03!C@cI|cX z^ewwOgP}kG00jU5PyhfF00000KIUddWzUy*zYj(XS zo#2k@&yew7MfZI4zGbj5%K6?ETfXnR=e?&s%=R5=*Q$L!xO%M*Oy+NSp7(1bO*@-! zeNVjg!`Ha__?%a};t#KV*Ig4ehLd#HZe5%WSj~3RyPd!h+Oykk-8A;c00%OJ&5)RG ztzZTyDyoWr1qzf8ce-*`#_b&`~O?by^sztE~1O3vuo5Rh`LsGW40~*GGGf zPkZB|RQR@KQ$s-R(QfYVxO>gByB(iRb4rwhNl&iel{AKY-g)hu*{4F9(z?@gEw^V} z*k>>R001Z(;r99-PrD}1S6tohk5Or+g!>WWuW9l?;k?@!?(**4fMbO2WQ3SjEY|`r1R~S-8op4hizH5 zZJVK|2rd%0M72}MN_ly03Lu262{qixapuq!IDEzDy6p7ni5vE zvAVms$OAwqHg4?L0noVH4FKD=8s*Sk7t5?i9^UP%z3b8Dbba^F2hWYOH{EvTcYP0f zCNbRm>+Apk0000!$C&xIS@S)4>^Ugzy;~{NwKG>SO?}u?-^ zYoHD4qPvZGO1tfYRDJWhUER|;dA-{3yPL<#rBr|b$7zpvoxMJNA5aw?>zhca6KxH7 zeeN~f01B<{6~4WFd+yyc)y>^ERz2)Q8e0majit0`DFGT1=yEP;Q&{Xck1STz7;T2u z0ZJMhd8O}XJH_ForBPGT#_v6DbiBKCLidMCgSG(i-E2rb0N1CX>GlKR@1~Ri3i+WN znwxeQcJ1!PQ?Wj}`p3q7?Fqc-TjRM6 zn5%=UyEmMpdvxS-dE2MDX7*z@x!-N?yRB{&w(fn`nmcoMZEyep9RL&n4u-0(==*Bv zZvlY_pIQgEv|#k_Ub-!sYV3F3-F){w(DUYj<|MOax&;6R<#2euGN*3La1^9T%+}iO z?G7oq_keq?2iqmq54P*O0YFdy2J_0R%t|Fb=eN7P+2?uNw>fR*#qV`Zb#{ty4$bDO zS@+O4zTdH@9gy?Rx_gJm2Ww{7>!eaL;wXwQ-&hMT*9zW5v9^Y43I zxLc>YyAMoK?X2w8RUHV{rRmw7cYOdf3(p&a*_F1J(>865RH?AlX}fjRBv5yydcX(UP7O~$LJ<%^fDqFm83fa6nWPN{i1w$Y bJxnyhAqYYtOp_=ur;|~jGynmh0006^ON7Ej diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/226780b32ba8f87ec614fdb376aa0884011c4ca9-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/226780b32ba8f87ec614fdb376aa0884011c4ca9-17 deleted file mode 100644 index 747cfa5225ae3e20fdc337a7414292ff69eff5d4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 mcmZQk@|DO-Y&o%a|NB@5Mh0F628R6qK!TBhiQ#{Q{C@zSmkDD4 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/22897c61698649d7570de91613afdc19b66e6965-20 b/vendor/github.com/pierrec/lz4/fuzz/corpus/22897c61698649d7570de91613afdc19b66e6965-20 deleted file mode 100644 index ea2cb769eb165150857c610aab5410ff18651319..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 71 zcmZQk@|DO-Y&o%a|NB@5Mh0O9h6V`+ULXlVEg%ZSkOdM-AYqV9&Hw)(SqKGYOXL9n DeYF#P diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/23.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/23.bz2 deleted file mode 100755 index bc8b6be6a45bc9948999c68a8ec9070ce440a596..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48 zcmZ>Y$}lx_j8qGb>{?*|jDdkQkU@ljfnf)O0)vuKS~Eklyp{{s!*Z42R!)vp%>NPq DG#3po diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/234cc427d9be32470f3c2e11a6bc16567f558e55-22 b/vendor/github.com/pierrec/lz4/fuzz/corpus/234cc427d9be32470f3c2e11a6bc16567f558e55-22 deleted file mode 100644 index c69e874b3b8a83312db1400176c83fb7889b297a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 37 kcmZQk@|Ey)U|?|4^5tPrXB1*!V2C@NY}6vb%Y$}lr@j8qGb>{?*|jDdkQkU@ljfnf)O0)vuKS~Eklyp{{s!*Z42R!)vp%>NPq DG(-(B diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/2486a84bf0f161f45b050d9c19ea9e35f5def864-8 b/vendor/github.com/pierrec/lz4/fuzz/corpus/2486a84bf0f161f45b050d9c19ea9e35f5def864-8 deleted file mode 100644 index 9ed0d0e084d5b31b353699faaa49f01fedd1ec0a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25 ecmZQk@|DO-Y&o%a|NB@5Mg}GZ1_ltH!4Uvv*#{l~ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/25.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/25.bz2 deleted file mode 100755 index 869a668b58382ce1b5c9d869d038e9b47a3a2840..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48 zcmZ>Y$}l%{j8qGb>{?*|jDdkQkU@ljfnf)O0)vuKS~Eklyp{{s!*Z42R!)vp%>NPq DG;s|v diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/25252b16cd4afa8ef86122448688c7095684c86b-12 b/vendor/github.com/pierrec/lz4/fuzz/corpus/25252b16cd4afa8ef86122448688c7095684c86b-12 deleted file mode 100644 index b5a986b2526123e39a24241fc9c34fda8adb9e40..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25 bcmZQk@|B28Y&o&_G6MqxBM>t%uz-aCP=5tz diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/26.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/26.bz2 deleted file mode 100755 index 2d7678a7acfdaf4e7ec6cf9de1e87cc3b26dcd70..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48 zcmZ>Y%CInWj8qGb>{?*|jDdkQkU@ljfnf)O0)vuKS~Eklyp{{s!*Z42R!)vp%>NPq DG@cDI diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/263fb3d738b862ec4050e5a9fbabfbd99cb0d9a5-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/263fb3d738b862ec4050e5a9fbabfbd99cb0d9a5-16 deleted file mode 100644 index 72e921a6686e4a2eda634e52f94048f906a51078..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 lcmZQk@|EyTY&pTe$nfhwBLgo314I3P5J+cdWcdG|0RXLx408Yg diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/27.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/27.bz2 deleted file mode 100755 index 409a9a2d97d6f314b6e6c898648fa5a1a57ef4d0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48 zcmZ>Y%CIzaj8qGb>{?*|jDdkQkU@ljfnf)O0)vuKS~Eklyp{{s!*Z42R!)vp%>NPq DG|LS$ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/276580343a14eec04143e89a778dae3e14df472c-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/276580343a14eec04143e89a778dae3e14df472c-17 deleted file mode 100644 index ce6896874300e2ea4a8df1a3bbbbe3415fa37ea3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 52 qcmZQk@|Ey)U|?|4^5tPrXJlhwU&j9?kU#SB{{n1Qk>VvhmV1qnj{ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/27fb5dc4016dc640e55a60719a222c38c604fa6b-2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/27fb5dc4016dc640e55a60719a222c38c604fa6b-2 deleted file mode 100755 index c742bacacc44aee87823184e0084d75395aa9b02..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14 VcmZ>Y%CHbGaz1i=6R diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/28.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/28.bz2 deleted file mode 100755 index 112aea2e8c0e8b29c3121f2be1ec2a17b9e35f86..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 157 zcmV;O0Al|_T4*sbL0KkKSzC~1Q~&@LAAmpyA|NjWQ~*Ek+z=oDl|xkVKxoL&9*`cO zB}{~RDWlZ%n4Y1K^$n*SV}F#i(Y8Y9`#e_0kn2jrQ&<)@Eixu}cN3W|eT2obV{S=$ zAzI3A5HnhFs6ue@OfL1nJQJm4Zx?=H_0Wa}WDBNGGL(qUj^!B))kRsU*Y%CHbGad{ivR!s diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/2b39aa66ecfac58e61185c9664a968233931496a-9 b/vendor/github.com/pierrec/lz4/fuzz/corpus/2b39aa66ecfac58e61185c9664a968233931496a-9 deleted file mode 100755 index 27cfb62c..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/2b39aa66ecfac58e61185c9664a968233931496a-9 +++ /dev/null @@ -1 +0,0 @@ -"MM@"©½¿ïp+[Ô \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/2c2a5947341d76797a7e2299f39d01e3aebb2eb8-19 b/vendor/github.com/pierrec/lz4/fuzz/corpus/2c2a5947341d76797a7e2299f39d01e3aebb2eb8-19 deleted file mode 100644 index e94d8da56642f42dec7b8e176ad022612050e10e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 70 ocmZQk@|Ey)U|?|4^5tPrXJlhwU&j9?jJ5kr+VY>{9F0ENyAp8x;= diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/2cc2308b75a2e8f7eafcf69370767e5fce314892-13 b/vendor/github.com/pierrec/lz4/fuzz/corpus/2cc2308b75a2e8f7eafcf69370767e5fce314892-13 deleted file mode 100644 index 202625196ed4cd0cd6601c52e0e0557ad943d6d3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 mcmZQk@|DO-Y&o%a|NB@5Mh0F628RFt|2Hr&NaQgA*%AP=Y$}lo?j8qGb6zG4f!@$5^{oTi@A>%^Ir@q-C3PxY|a9gTBdgrm6ONdGEPig}I Dnb8ta diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/2de93224b5f0db491ced1ec491a9f41d71820671-11 b/vendor/github.com/pierrec/lz4/fuzz/corpus/2de93224b5f0db491ced1ec491a9f41d71820671-11 deleted file mode 100644 index 71c5a14eba7cd3ebbcb82906e86c2f2bd2274ede..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23 ecmZQk@|Ey)P+GZn|N8>%=qn5i3=K|Nz7hai6bEVm diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/2e8487cf61feda70c0d74f12bfb5b692b684f82a-9 b/vendor/github.com/pierrec/lz4/fuzz/corpus/2e8487cf61feda70c0d74f12bfb5b692b684f82a-9 deleted file mode 100644 index f1c5b7a4fefd4ada12ca7689806d3ebe6ed44dae..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25 dcmZQk@|B28Y&o&_G6MqxBLfqVl*j|}839z823Y_A diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/2f0ee9cf4bb951a37efc6460d5709442bc3de54e-6 b/vendor/github.com/pierrec/lz4/fuzz/corpus/2f0ee9cf4bb951a37efc6460d5709442bc3de54e-6 deleted file mode 100644 index 49c3db23b6d6173fb72a79e88637ff17a29379bb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23 ccmZQk@|DO-Y&o%a|NB@5Mg|bbNX%0J09l;|#{d8T diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/2f1ba7fe1cd90a4023706a2ea9c7c9dca8128119-30 b/vendor/github.com/pierrec/lz4/fuzz/corpus/2f1ba7fe1cd90a4023706a2ea9c7c9dca8128119-30 deleted file mode 100644 index 3d62f949ed5e5b9647961e404b8e3c9ab7fb29e3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 167 hcmZQk@|Ey#P+GZn|N8=^=qn6Bz%r&j9?jJ5yK|S$Y$6A0L}>w3IG5A diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/3.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/3.bz2 deleted file mode 100755 index 197b557835ae0dacf12146abcfdea1713aa313aa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42 ycmZ>Y$}l!`j8qGbWG}V6&cMLH$Y9@~z+lM3aO60|QVF$Okgy69EUm B3$XwI diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/31.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/31.bz2 deleted file mode 100755 index 22bbfb22c38d277892dc0bd8d87aee40bcfe6533..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 157 zcmV;O0Al|_T4*#eL0KkKSzC~1Q~&@LAAmpyA|NjWQ~*Ek+z=oDl|xkVKxoL&9*`cO zB}{~RDWlZ%n4Y1K^$n*SV}F#i(Y8Y9`#e_0kn2jrQ&<)@Eixu}cN3W|eT2obV{S=$ zAzI3A5HnhFs6ue@OfL1nJQJm4Zx?=H_0Wa}WDBNGGL(qUj^!B))kRsU*lvUT0LmH%YybcN diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/35.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/35.bz2 deleted file mode 100755 index fe6da549daf1ccee4d483b8c88fdc1ba181869a2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 157 zcmV;O0Al|_T4*>iL0KkKSzC~1Q~&@LAAmpyA|NjWQ~*Ek+z=oDl|xkVKxoL&9*`cO zB}{~RDWlZ%n4Y1K^$n*SV}F#i(Y8Y9`#e_0kn2jrQ&<)@Eixu}cN3W|eT2obV{S=$ zAzI3A5HnhFs6ue@OfL1nJQJm4Zx?=H_0Wa}WDBNGGL(qUj^!B))kRsU*Y$}lu^j8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?A%lSe^Ch?U462Xb Oc`WA=l6WJqDggkg$`6D9 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/3771c6e8ea0f20350dae0180a9b14e36b8aef244-22 b/vendor/github.com/pierrec/lz4/fuzz/corpus/3771c6e8ea0f20350dae0180a9b14e36b8aef244-22 deleted file mode 100644 index 0d255bfa1e0cf635c76827fc44bafdc32e6a7823..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 71 scmZQk@|DO-Y&o%a|NB@5Mh0O9h6V`+ULXlVEg%ZSkOdMgU|}4R06eD+pa1{> diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/37ee7fab504f2d2039753d73dd0290c884bd57bf-8 b/vendor/github.com/pierrec/lz4/fuzz/corpus/37ee7fab504f2d2039753d73dd0290c884bd57bf-8 deleted file mode 100644 index 901a42d396b5128898f62b06b22b2c70753ba00e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25 VcmZS3@|9o!0v04%!dHTY0RS7;0yh8v diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/38.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/38.bz2 deleted file mode 100755 index 30ca1c20ad7c03598b3103c2dbf1976e55f3a3f1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 58 zcmZ>Y$}lo?j8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?A%lSe^Ch?U462Xb Oc`WA=l6WJqDggkh0S|=$ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/39.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/39.bz2 deleted file mode 100755 index 16298f604e58d565c56b9217a7432c381fa4208b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 58 zcmZ>Y$}l!`j8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?A%lSe^Ch?U462Xb Oc`WA=l6WJqDggkhI1h#Z diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/396101a712463bb336a18f4096fc3eb5923600c1-10 b/vendor/github.com/pierrec/lz4/fuzz/corpus/396101a712463bb336a18f4096fc3eb5923600c1-10 deleted file mode 100755 index 7274cb5ed0009231db71fae8344f5c297e5eef84..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 74 WcmZ>Y%CHbGak diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/397127b75cb59b253ed49206082b0428b6b23d02-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/397127b75cb59b253ed49206082b0428b6b23d02-17 deleted file mode 100644 index 4a94d99edaad732774b82443653252cb142b220c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 43 ccmZQk@|Ey)P+GZn|N8>%=qn5i3=Oz20R7MjnE(I) diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/39ccf446395ef707cf92a04b5508deda399372c2-15 b/vendor/github.com/pierrec/lz4/fuzz/corpus/39ccf446395ef707cf92a04b5508deda399372c2-15 deleted file mode 100644 index 974c4236d8fd0df9d712bb0ee8d611ebe05ebaea..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 lcmZQk@|DO-Y&o%a|NB@5Mh0F628R0oAdt?^$ngI^0|3Y84P*cS diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/3b6fd6da48bb34284390a75e22940e7234dbbd28-34 b/vendor/github.com/pierrec/lz4/fuzz/corpus/3b6fd6da48bb34284390a75e22940e7234dbbd28-34 deleted file mode 100644 index de4e0a82c061df3cfe02c205800ccf515b685a27..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24 fcmZQk@|CDdY&o%a|NB@5#_fy@3=B0Ad5J9mX&DF~ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/3de3c5c394a3cf05620bb80871a1f10e9e36f25b-8 b/vendor/github.com/pierrec/lz4/fuzz/corpus/3de3c5c394a3cf05620bb80871a1f10e9e36f25b-8 deleted file mode 100644 index 667f2f2a71501f2b69634fa6d3faf406cde73dda..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25 ecmZQk@|DO-Y&o%a|NB@5Mg}$ph6a!bgChWFvIi^x diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/3dee65f1cf51dfe2e5be498150ce22d2ac5a07fd-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/3dee65f1cf51dfe2e5be498150ce22d2ac5a07fd-1 deleted file mode 100755 index 8600d6d505cd27825bbe8a69b069f3b7fdc17e16..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22 dcmZ>Y$}ll>j8qGbObcOP5R-3kU{_Mm2LM3Y1pNR2 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/3e34341fb51769fd9d948bdd20c011e335b145f4-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/3e34341fb51769fd9d948bdd20c011e335b145f4-1 deleted file mode 100755 index 194057f9beefcd15205479ab1c65f3d361d07cf6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 64 zcmZ>Y$}lo?j8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C{x RvzUFH8ZM-K>YE*+008=i5OM$j diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/3ee211efb3d5d8058cd9a8c59e40c8d0f7a3df53-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/3ee211efb3d5d8058cd9a8c59e40c8d0f7a3df53-1 deleted file mode 100755 index bb86190c205ae17643f402d0d6983e2f9b6ff02d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 zcmZ>Y$}lu^j8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?A%lU!CAaqss*m1z HEaw6MO5F`n diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/4.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/4.bz2 deleted file mode 100755 index 679a04b7741552f16f2107271c43c7d5cf2d24cc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42 ycmZ>Y$}ll>j8qGbWG}V6&cMLH$Y9@~z+lM3aO6Y$}ll>j8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?A%lSe^Ch?U462Xb Oc`WA=l6WJqDggkhZx4q6 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/405726718b3f54a0cfae1666f06d3cc1ee747104-14 b/vendor/github.com/pierrec/lz4/fuzz/corpus/405726718b3f54a0cfae1666f06d3cc1ee747104-14 deleted file mode 100644 index 63f58da0e6967f29514d765df519a8db4f388194..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27 ccmZQk@|Ey#P+GZn|N8=^=qn5i3=J?00Ch$OivR!s diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/407188676d45d6f9dd5f3c84e7df0e763c7cca57-22 b/vendor/github.com/pierrec/lz4/fuzz/corpus/407188676d45d6f9dd5f3c84e7df0e763c7cca57-22 deleted file mode 100644 index 76680dd54f939e3ed45a4bb7932d17663e6dfbb5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82 zcmZQk@|DO-Y&o%a|NB@5Mh0F628Npd{~H(>B=VRT6ih(^4L~+Rg)C4&2_y`XfhdJi GP;CJBSQ)MW diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/408ac1a4a83e082e848c208eed903930d81e81b6-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/408ac1a4a83e082e848c208eed903930d81e81b6-17 deleted file mode 100644 index 934e10f0702b38f38b1066f17363509c7ffbec7a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 67 zcmZQk@|Ey)U|?|4^5tPrXVhR|Xkf@MOD!@~F*GnTGBz+WF*7nXG&M6dF)%SQHZwLf UGqW(TG_Wu-FlGRP*KX>e0HSvd2><{9 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/41.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/41.bz2 deleted file mode 100755 index d63608f3990a460aacc4e5e5f072b726fb26b767..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 58 zcmZ>Y$}lx_j8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?A%lSe^Ch?U462Xb Oc`WA=l6WJqDggkhrVoe! diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/4114fd99aaa4dc95365dc4bbcb3c9a8a03434a5a-29 b/vendor/github.com/pierrec/lz4/fuzz/corpus/4114fd99aaa4dc95365dc4bbcb3c9a8a03434a5a-29 deleted file mode 100644 index 4c8ea2b5fe46b2be82e7e9e53f4cc22e407b20cb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 55 zcmZQk@|DO-Y&o%a|NB@5Mh0F628R6qK!TBhiQ&(G83th>`~Uw228MB=Q&->i;tcGcYtTfcOnHAYmq`oGeg4 O38Wf`ATlrtssI4@Y#o^Z diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/413e39442f005279560ddad02bbdd1a05c9f0eaf-4 b/vendor/github.com/pierrec/lz4/fuzz/corpus/413e39442f005279560ddad02bbdd1a05c9f0eaf-4 deleted file mode 100644 index 8e4e3529..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/413e39442f005279560ddad02bbdd1a05c9f0eaf-4 +++ /dev/null @@ -1 +0,0 @@ -"Mna„Ƚ¿ï½¿ï½ \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/41b7eaf8892043eccf381ccbc46ab024eb9c503c-4 b/vendor/github.com/pierrec/lz4/fuzz/corpus/41b7eaf8892043eccf381ccbc46ab024eb9c503c-4 deleted file mode 100644 index 6b428d19e1d8c2a3ed5259538555cd48520856ef..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9 QcmZS4@|7?VU|?VX00nFS*Z=?k diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/42.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/42.bz2 deleted file mode 100755 index f0f25c7fd78aae73103e7cc63a701c3c11e818ee..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 58 zcmZ>Y$}lr@j8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?A%lSe^Ch?U462Xb Oc`WA=l6WJqDggkh-4BTX diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/4208b7fe7ac3a530c159a1c8fd09dd3078b5650f-15 b/vendor/github.com/pierrec/lz4/fuzz/corpus/4208b7fe7ac3a530c159a1c8fd09dd3078b5650f-15 deleted file mode 100644 index c8c2c6aa62426046320864a69c8868615dac6e1f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35 ccmZQk@|Ey#P+GZn|N8=^=qn5i3=QZQ0JNS6ivR!s diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/421bd1daa317c5d67fa21879de29d062c342294b-5 b/vendor/github.com/pierrec/lz4/fuzz/corpus/421bd1daa317c5d67fa21879de29d062c342294b-5 deleted file mode 100644 index 6a19eb3d8ac56d593b679b31cfab290dbe9f960d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19 ZcmZQk@|DO-Y&o%a|NB@5Mg}0Y7XU}<2$%o> diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/42544ff3318fe86dd466e9a05068e752a1057fcc-32 b/vendor/github.com/pierrec/lz4/fuzz/corpus/42544ff3318fe86dd466e9a05068e752a1057fcc-32 deleted file mode 100644 index a71836ea2676ab4efde32a1593c920c104903222..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 123 zcmZQk@|CDdY&o%a|NB@5#_cH#3=H-EK|mspi9vypL70J|LBi0`(#XKl$imXp)W965 uf`NgTfdQnR!G2rj-G&;8Jg5l{U`YX>9wi_Ka=_S$AHfER0VRR@{{sN*Tp$nt diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/42b056f9dac9cc658c80092e490b3dbcd436e3f8-15 b/vendor/github.com/pierrec/lz4/fuzz/corpus/42b056f9dac9cc658c80092e490b3dbcd436e3f8-15 deleted file mode 100644 index 2b38240671da6d27fa7c9648dfe5085d580c4684..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 mcmZQk@|DO-Y&o%a|NB@5Mh0F628IeCW?+!WV`5NXl>h*diw3#? diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/43.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/43.bz2 deleted file mode 100755 index f68d3a73a41dcc6ca6420448a0efc858b52bc8df..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 58 zcmZ>Y$}l%{j8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?A%lSe^Ch?U462Xb Oc`WA=l6WJqDggki6c363 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/432c09281c46537c98864bc7d601780562b68410-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/432c09281c46537c98864bc7d601780562b68410-1 deleted file mode 100755 index f462929668c6773d243d7ecbe686edd62ef035e2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22 dcmZ>Y$}lo?j8qGbYz<~$V7L>*z_5UU0RTmB1%vY%CInWj8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?A%lSe^Ch?U462Xb Oc`WA=l6WJqDggkiOAm_x diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/446dc91ff0ddc34c3b02f741e3f6f079a4dfcae8-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/446dc91ff0ddc34c3b02f741e3f6f079a4dfcae8-17 deleted file mode 100644 index 2ae4b5acf927f24991600909fda9df2bd6913ed0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 81 WcmZ?L@|9o!0w*-ul0-gCKLY?@_XrRG diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/45.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/45.bz2 deleted file mode 100755 index 855e812da10291f9a0b563932857d0324099879c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 58 zcmZ>Y%CIzaj8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?A%lSe^Ch?U462Xb Oc`WA=l6WJqDggkif)9)U diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/451831159c1afb87077066147630b4b6caeb54c3-11 b/vendor/github.com/pierrec/lz4/fuzz/corpus/451831159c1afb87077066147630b4b6caeb54c3-11 deleted file mode 100755 index bfd673d45aa96bdd9ee338aa4dab59c6b96156e2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25 ScmZ?L@|9o!0w-h|!~p;(&;vyP diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/46.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/46.bz2 deleted file mode 100755 index a11978d36014479974c9b2ec30e1dc1b82b8d5e2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82 zcmZ>Y$}lu^j8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C{x kvzUFH8Zs`VeCnGWqG0rO54WZIqjw(5xrCSm|D-km0Bg4w_5c6? diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/47.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/47.bz2 deleted file mode 100755 index 78995878879f418a2ff19c29f808255ee361aacf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82 zcmZ>Y$}lo?j8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C{x kvzUFH8Zs`VeCnGWqG0rO54WZIqjw(5xrCSm|D-km0Bi;q_W%F@ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/48.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/48.bz2 deleted file mode 100755 index d7b0df5d367f6506b92c91de2859a23911995b40..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82 zcmZ>Y$}l!`j8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C{x kvzUFH8Zs`VeCnGWqG0rO54WZIqjw(5xrCSm|D-km0Bltk_y7O^ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/49.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/49.bz2 deleted file mode 100755 index 0e16e69c13346cdfcbbd4306e763a01ab43af9f0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82 zcmZ>Y$}ll>j8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C{x kvzUFH8Zs`VeCnGWqG0rO54WZIqjw(5xrCSm|D-km0Boce`2YX_ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/49861b3d9bca3e2857d806aaecaac09af4bff1dd-2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/49861b3d9bca3e2857d806aaecaac09af4bff1dd-2 deleted file mode 100755 index 0648054b9d0bb5320f83f8cfbfc79ee18ca0b232..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35 rcmZ>Y$}lu^j8qGbRA4aVVK{Oq^<3Y%CHbGa}ga9Lxe< diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/4b0ab2fc1fdfc56066c5c1f2751b292f4ddc557e-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/4b0ab2fc1fdfc56066c5c1f2751b292f4ddc557e-16 deleted file mode 100644 index 51ffa5a1efd59285a4f3cf5df05f47a3369140ec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 71 vcmZQk@|B28Y&o&_G6MqxBM>t%uz-am&I@Y6#UMhMs_O3B=(cCY$}lo?j8qGb6zG4f!@$5^{k_4dA>%^Ir@q-C3PxY|a9gTBdgrm6ONdGEPig}I DpMesE diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/4bd00d26b893ce064dad6e771f30541b541d43b9-18 b/vendor/github.com/pierrec/lz4/fuzz/corpus/4bd00d26b893ce064dad6e771f30541b541d43b9-18 deleted file mode 100644 index 244f762135fc9bf5123f4dc774beda16f78d9593..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 114 zcmZQk@|B28lsGS_b(w*Iff0xqSinLpC-%ZcphCo`%1Z=l-2XlnXcQ9zLxU0nFOaPH QFA-UH-$u7RV}GIq0A!*YGynhq diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/4cde5adc216a29fff2ec39e23ccc6fca80cd4a15-21 b/vendor/github.com/pierrec/lz4/fuzz/corpus/4cde5adc216a29fff2ec39e23ccc6fca80cd4a15-21 deleted file mode 100644 index 9a3f3a8f744392c70e32a58cc5caa85c4ab87872..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 83 ccmZQk@|Ey#P+GZn|N8=^=qn5i3=NcG0NBh9ivR!s diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/4d1b64babe1f045b8374f4d74949622591546eb5-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/4d1b64babe1f045b8374f4d74949622591546eb5-17 deleted file mode 100644 index 983d0a4f2365a137bdc720b79c121899402f2796..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 56 zcmZQk@|Ey)U|?|4^5tPrXB1#yXkh3qOD!@~F*GnTF$Q8YsF;zdp{bdviGhicv6-V6mBLY%CHbGaY$}lx_j8qGbWG}V6&cMLH$Y9@~z+lM3aO6Y$}lx_j8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C{x kvzUFH8Zs`VeCnGWqG0rO54WZIqjw(5xrCSm|D-km0BrLY`Tzg` diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/50a87eb0c097a7ebf7f1bf3be2c6a7dbe6b6c5c3-23 b/vendor/github.com/pierrec/lz4/fuzz/corpus/50a87eb0c097a7ebf7f1bf3be2c6a7dbe6b6c5c3-23 deleted file mode 100644 index e6a919b262580e935678fd90159ab65434172abe..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 105 zcmZQk@|DO-Y&o%a|NB@5Mg}GZ1_lWRULaWm#6Wf)6N7>&NT8txSsjLeEKr3#NF7+_ Q|NjOC2D2J41tui&0C9mGo&W#< diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/50e3ac1126c605158726db6f2cca3120f99b8e73-22 b/vendor/github.com/pierrec/lz4/fuzz/corpus/50e3ac1126c605158726db6f2cca3120f99b8e73-22 deleted file mode 100644 index 5ad9706b72da7752f765a964e2aac568f88d9411..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 105 zcmZQk@|DO-Y&o%a|NB@5Mh0F628Npd{~H>BG!ugYBZDvlLxY5&p{0?5rICfDsi}cE eP=*1duAv55qbyKB2_y`b0h$0(0H(l%L>>UBf*nNw diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/51.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/51.bz2 deleted file mode 100755 index b965c7a646f1fa109fa59d67bf21668b0f70ac4c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82 zcmZ>Y$}lr@j8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C{x kvzUFH8Zs`VeCnGWqG0rO54WZIqjw(5xrCSm|D-km0Bu4S`v3p{ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/51075c34f23d161fb97edcf6f1b73ee6005009a0-28 b/vendor/github.com/pierrec/lz4/fuzz/corpus/51075c34f23d161fb97edcf6f1b73ee6005009a0-28 deleted file mode 100644 index 4b93cd1b82c356135e91b096f11ee90948b1d725..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 61 ucmZQk@|DO-Y&o%a|NB@5Mh0F6hK3qs0RfB=VRT6c`zV85kNQ3=J)f3_!@#)W965 zf&rwip$4WAsEY%CHbGazW^!#00x~HM*si- diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/52.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/52.bz2 deleted file mode 100755 index 8215bac9f089d253c69d2685a83a4d9a97f90431..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82 zcmZ>Y$}l%{j8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C{x kvzUFH8Zs`VeCnGWqG0rO54WZIqjw(5xrCSm|D-km0Bw;M`~Uy| diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/53.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/53.bz2 deleted file mode 100755 index 2ab0a6de2512a19021319339370d361a190bb63f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82 zcmZ>Y%CInWj8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C{x kvzUFH8Zs`VeCnGWqG0rO54WZIqjw(5xrCSm|D-km0BztG{Qv*} diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/54.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/54.bz2 deleted file mode 100755 index 6004de354485e1710d01b2429c4db9a597e4d835..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82 zcmZ>Y%CIzaj8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C{x kvzUFH8Zs`VeCnGWqG0rO54WZIqjw(5xrCSm|D-km0B$cA{r~^~ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/5431cabbc58d8dc143ece079de40300c1ce6e101-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/5431cabbc58d8dc143ece079de40300c1ce6e101-1 deleted file mode 100755 index 4061e90f86018fa5953c22b91b481bf3e4647abe..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 144 zcmV;B0B`?7T4*#eL0KkKSzC~1Q~&@LAAmpyA|NjWQ~*Ek+z=oDl~nOSXvokWkRG5V zdMTsS^q8KZko66x9F(-twnFIpJXXe#>q^5@SQa)dGA4L;6PYf3gvGLBZb^C}TFPz^ yGg@(|LU8d+F7?1X6QyKt7k**&(1r$NPn3wxj^!B))kRsU*Y$}lu^j8qGbRA4YE=Y$}kZxaB=VRT6c`zV85kNQ3=J)f3@nW-EKN-f z%z-KxKUFpMm1pS|9`N_Oh8={VDoFhCV&Y}g{f%%IGWcZ#XfDFpy!tOnr# diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/59b254c3565c9eed2bc93385b821da897afcbb15-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/59b254c3565c9eed2bc93385b821da897afcbb15-1 deleted file mode 100755 index a6cbaf736a7253a27ec10d0aa24f3ee5611619d3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44 zcmZ>Y$}lo?j8qGb6zG4f!@$5^{k?&qL4b*)(SgCCfkTKvfT2-9fI;Da0)rs{3}gzD diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/5a962e3d6a128983afe9ea78a28cce0f40a790c0-14 b/vendor/github.com/pierrec/lz4/fuzz/corpus/5a962e3d6a128983afe9ea78a28cce0f40a790c0-14 deleted file mode 100644 index 014f6a6f30af0debdd97a780bfb590e6e6181838..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42 jcmZQk@|B28Y&o&_G6MqxBM>t%uz-am&I@Y6#Xv#;$|48p diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/5af52ef91b6f717ffdd805585e24806407e9621b-14 b/vendor/github.com/pierrec/lz4/fuzz/corpus/5af52ef91b6f717ffdd805585e24806407e9621b-14 deleted file mode 100755 index 8695d399b324f101638ad360c39949a7f582aa2a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 114 XcmZ>Y%CHbGa%=qn5i3=JUcq?IM%1OR}02uT0{ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/5bd895c23369df9505dd99ffcd035dc5e897264b-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/5bd895c23369df9505dd99ffcd035dc5e897264b-1 deleted file mode 100755 index affcc488964e3a17c555604546dd3bfe94d8d2d9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 49 zcmZ>Y$}ll>j8qGbjGmAd!oVOV-{8Qmq@d3d!eIaYj(`G#bI3=Bj1w2+u6&udaKcyK FY$}l!`j8qGbjGmAd!oVQ*e*az|QU&63^Rt2ERA-*6u8}Km#wz;#n520K00b%* AQvd(} diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/5c4f347c3567baf700dfccf49a91192c83b89da2-8 b/vendor/github.com/pierrec/lz4/fuzz/corpus/5c4f347c3567baf700dfccf49a91192c83b89da2-8 deleted file mode 100755 index 6282cf69..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/5c4f347c3567baf700dfccf49a91192c83b89da2-8 +++ /dev/null @@ -1 +0,0 @@ -"MM@"©½¿ïp+[ \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/5dd8001f8a87c24f866074c36b6b80f42b298ff0-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/5dd8001f8a87c24f866074c36b6b80f42b298ff0-1 deleted file mode 100755 index 39ef02cbfe8c969084e4200339e01042b5782d19..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38 tcmZ>Y%CInWj8qGb>{?*|jDdlbAw`6Nfnf)O0)rCQLzUoGPL5T~{{Y4_3PS(@ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/5ddf63d61aa38da1d409e37b301e0fe5a207a051-27 b/vendor/github.com/pierrec/lz4/fuzz/corpus/5ddf63d61aa38da1d409e37b301e0fe5a207a051-27 deleted file mode 100644 index ea34cb440c9d3528e08033b6177f44ebd54201be..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 156 ecmZQk@|Ey#P+GZn|N8=^=qn6Bz%odLRssM#7BOuA diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/5e19e298d051aac48b7683dc24577b46268b630c-35 b/vendor/github.com/pierrec/lz4/fuzz/corpus/5e19e298d051aac48b7683dc24577b46268b630c-35 deleted file mode 100644 index a39d2617255c92e35dbea47856f6cba1068c041d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24 gcmZQk@|CDdY&o%a|NB@5#_fy@3=B2*+*X|j0Bn{CQ2+n{ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/5e54c67050ee8583c7453ff13d6eec15b2255288-20 b/vendor/github.com/pierrec/lz4/fuzz/corpus/5e54c67050ee8583c7453ff13d6eec15b2255288-20 deleted file mode 100644 index 0a87f43f7dc7ded006b3b5e0945ade4bd4fb5845..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 83 ccmZQk@|Ey)P+GZn|N8>%=qn5i3=NcG0NmCNnE(I) diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/5f946423d1138924933334c6e5d3eb13e1020e9c-33 b/vendor/github.com/pierrec/lz4/fuzz/corpus/5f946423d1138924933334c6e5d3eb13e1020e9c-33 deleted file mode 100644 index fe8e779fa12c16b7e9ef7a35d5f6b43cfc625596..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33 hcmZQk@|CDdY&o%a|NB@5#_hZe3=K6Bd2j&+0RYr53`_t3 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/5fbebd9edd144c4b9869ed4ab40c7cc3c46a4a8f-4 b/vendor/github.com/pierrec/lz4/fuzz/corpus/5fbebd9edd144c4b9869ed4ab40c7cc3c46a4a8f-4 deleted file mode 100644 index bf8e930d..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/5fbebd9edd144c4b9869ed4ab40c7cc3c46a4a8f-4 +++ /dev/null @@ -1 +0,0 @@ -"M@c¯ \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/6.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/6.bz2 deleted file mode 100755 index 0cfbc601e365f93232364b895863098d7a65bf63..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42 ycmZ>Y$}lr@j8qGbWG}V6&cMLH$Y9@~z+lM3aO69u6S@ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/6046b14dd1f6925bcfe470a8484353f525db6a9c-19 b/vendor/github.com/pierrec/lz4/fuzz/corpus/6046b14dd1f6925bcfe470a8484353f525db6a9c-19 deleted file mode 100644 index 1cd81c49e0b0da9db531291694f168544beb4c34..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 57 fcmZQk@|Ey#P+GZn|N8=^=qn6Bz(P<6s6_$*62=nb diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/608a9993a51ec7bf252ac76b163def5f7002d2e4-4 b/vendor/github.com/pierrec/lz4/fuzz/corpus/608a9993a51ec7bf252ac76b163def5f7002d2e4-4 deleted file mode 100644 index 79174b20..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/608a9993a51ec7bf252ac76b163def5f7002d2e4-4 +++ /dev/null @@ -1 +0,0 @@ -"M@T \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/610d8dc3cf4012e4e2d070988b0720285a4c361e-7 b/vendor/github.com/pierrec/lz4/fuzz/corpus/610d8dc3cf4012e4e2d070988b0720285a4c361e-7 deleted file mode 100644 index 20c9dd137d1deb6d2a0cf22c2712cd86361c57e5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 21 ccmZQk@|DO-Y&o%a|NB@5Mg}GZh6V;l08GmUGXMYp diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/61b196987682fb64ef9c4ff37532bf9b2ac201bc-14 b/vendor/github.com/pierrec/lz4/fuzz/corpus/61b196987682fb64ef9c4ff37532bf9b2ac201bc-14 deleted file mode 100644 index d9fd8979ef91e7678a72d9e5eed974dbd34e5346..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30 lcmZQk@|Ey)U|?|4^5tPrXJlhwU&Y}k?__V{qN8~{!f2I&9* diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/626f8b6efa3ea0f254789fe6cf52f6e52538f357-25 b/vendor/github.com/pierrec/lz4/fuzz/corpus/626f8b6efa3ea0f254789fe6cf52f6e52538f357-25 deleted file mode 100644 index 6b1a5b5844ac62b4a9fc4c350a76770f1c18031e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 219 zcmZQk@|DO-Y&o%a|NB@5Mg}GZ1_lWRULaWm#6Wf)6N7>&NT8txSsjLeEKr3#NF7+_ S|NjOC2BR7<_5c3>nG67ZGB@7< diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/6277f2e0a6df2ac61660ee1965c690b87c26b556-7 b/vendor/github.com/pierrec/lz4/fuzz/corpus/6277f2e0a6df2ac61660ee1965c690b87c26b556-7 deleted file mode 100644 index a3979ce0f5fcfeb5e57fe29c3ea562d2c34e65db..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 21 ccmZQk@|DO-Y&o%a|NB@5Mg}GZ28Iug08J|fqW}N^ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/62c738f00c488f493989b2037d9cf1781f0bbd40-11 b/vendor/github.com/pierrec/lz4/fuzz/corpus/62c738f00c488f493989b2037d9cf1781f0bbd40-11 deleted file mode 100644 index f965e8c674b218d6516f5ff33a6f46637087e39d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38 gcmZQk@|DO-Y&o%a|NB@5Mg}GZ1_p^dWD+a@0K1Y0jsO4v diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/631ffa88df9713a124b3ba6c704c0c75727af2ff-6 b/vendor/github.com/pierrec/lz4/fuzz/corpus/631ffa88df9713a124b3ba6c704c0c75727af2ff-6 deleted file mode 100755 index b8f1f4a4d8a6408ee66904e79899ff314ed057b2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16 RcmZ?L@|9o!0w)OV1OO4z0p$Py diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/633df0cd78621cd45067a58d23c6ed67bb1b60cb-31 b/vendor/github.com/pierrec/lz4/fuzz/corpus/633df0cd78621cd45067a58d23c6ed67bb1b60cb-31 deleted file mode 100644 index 57f89bcb4fb441502ce536c9ba1a0ce2aa97501f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_q-%hCBuai99BT|Bej8Kmnj&3PVH9|Njk)3_t=T WC(FRlpahZtBCrgc`u~3xNC5yU@ftw@ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/635d5de257a1910a7fd0db2e567edfa348e47270-11 b/vendor/github.com/pierrec/lz4/fuzz/corpus/635d5de257a1910a7fd0db2e567edfa348e47270-11 deleted file mode 100644 index 32aa66d51fa27ed4d950f8a7adac40d1aaa9e029..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38 icmZQk@|DO-Y&o%a|NB@5Mg}GZ1_p^dWD+RQkOu&~Vh8pB diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/64c500b5addcbf8c673188a1477e4159851ae04f-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/64c500b5addcbf8c673188a1477e4159851ae04f-1 deleted file mode 100755 index 6c6541ba79787771828977d31a017d6585d7ff42..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 124 zcmV-?0E7QRT4*;hL0KkKSzC~1Q~&@LAAmpyA|NjWQ~*Ek+z=oDl|xkVKxoL&9*`cO zB}{~RDWlZ%n4Y1K^$n*SV}F#i(Y8Y9`#e^V>q^5@SQa)dGA4L;6PYf3gvGLBZb^C} eTFPz^Gg@(|LU8d+F7?1X6QyKt7k**&(1r$k<1+sM diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/660387064a3cf4cb81046989929abe1b4fbfc815-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/660387064a3cf4cb81046989929abe1b4fbfc815-17 deleted file mode 100644 index 1bf5f59afb271f1bcd67fbf5bc329a415862e7f9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 ccmZQk@|Ey#P+GZn|N8=^=qn5i3=M=Z083p8ivR!s diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/66068a7e7bdfd1038a84aeb3dec6e3cb4d17ad57-2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/66068a7e7bdfd1038a84aeb3dec6e3cb4d17ad57-2 deleted file mode 100755 index 1a2cd7c6d79c283c95fbabe626d0a03b09827cc1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16 VcmZ>Y%CHbGau0hr)3lvZS34>++|8HPmFsk|gAE@;I|M$E=lbL`_35FT~lxrYb diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/69dcc80940a26844b0afe7898fea9cf68b698214-4 b/vendor/github.com/pierrec/lz4/fuzz/corpus/69dcc80940a26844b0afe7898fea9cf68b698214-4 deleted file mode 100755 index 6864d20a003159f36636ebc2df5af296316091cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12 RcmZ?L@|9o!0;h%s1^^6H0u%rM diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/69fcd886042d5c3ebe89afd561782ac25619e35b-27 b/vendor/github.com/pierrec/lz4/fuzz/corpus/69fcd886042d5c3ebe89afd561782ac25619e35b-27 deleted file mode 100644 index 10a5a710215cf427832f74474db6f8ac94ddc840..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_tvyhCBuai99BT|BMX63=9nn{~Lg0&3_ODCM5D? PfdWb()j$N5K~(?%ZKE4` diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/6a04b54e1511633ec895326b4e043e186fa5693b-29 b/vendor/github.com/pierrec/lz4/fuzz/corpus/6a04b54e1511633ec895326b4e043e186fa5693b-29 deleted file mode 100644 index a8d5553527432ca1ef5bae6262824e1b9b259e44..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 171 ccmZQk@|Ey#P+GZn|N8=^=qn6Bz%odM08FGdTL1t6 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/6a3e8935204dcd3dc48a1ff7415c305f0e5863aa-9 b/vendor/github.com/pierrec/lz4/fuzz/corpus/6a3e8935204dcd3dc48a1ff7415c305f0e5863aa-9 deleted file mode 100644 index d1c7cf9a3dd8034811dbe47a18eac93115731a4d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 28 QcmZS4@|9o!0v0SZ03fUaO#lD@ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/6b351674a45f2d9be602fe8d3fb84229551b4ce3-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/6b351674a45f2d9be602fe8d3fb84229551b4ce3-16 deleted file mode 100644 index e736a98f5605aa405b676b4a3e6d71e8f41f85da..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 69 mcmZQk@|DO-Y&o%a|NB@5Mg}GZ1_lW~Fqwx$Ld20pP}l&qRSBv9 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/6b72fdd9989971ecc3b50c34ee420f56a03e1026-27 b/vendor/github.com/pierrec/lz4/fuzz/corpus/6b72fdd9989971ecc3b50c34ee420f56a03e1026-27 deleted file mode 100644 index 3eec515907e2e814604245d93300d58a6c7f3c1c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 112 zcmZQk@|DO-Y&o%a|NB@5Mh0F628RD&Ad$z!puor=%)rnfVQ6S+WMFAzVQFe=U=CEl m08-abgRD^&D4+xq2Fv{a-@w3NR0F2|{|B4Q1k@$LPy+yh3n1D6 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/6b7f4ac7aa8b357dee3067d7a60143c03b54bb8d-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/6b7f4ac7aa8b357dee3067d7a60143c03b54bb8d-16 deleted file mode 100644 index f1a956bcdb8a6c111a7fde3f2202c3d724e27d1a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42 ocmZQk@|Ey)U|?|4^5tPrXJlhwU&Y=~gBNH7D%Qp6qu0F0IgmH+?% diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/6bc138796e9b80572a6cb1b4a7ba30c97c22359d-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/6bc138796e9b80572a6cb1b4a7ba30c97c22359d-1 deleted file mode 100755 index e1fdc112331bc4a7efc663e09681430342ec387d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 46636 zcmV)GK)%01T4*sbL0KkKS(G)X0|3c~|A2H5p~Qdp|M>s^|LQ;g|L|hUKKsA`6YlAD zc`I*Yq>cap03SdA000000Pe>@o%ZE+&|9wQdZK`RbC%m4%MG;|_Z&S3uFrNr00Yvf zPytkx3ZMko01IFM1MbBD=%4@+NF%Ic3;?8kv>bPxFFi;`@6aWAeqKW_jHKKq5loTBl0MJoVC{aKaD`r6|I>_9? zM-U21B2p*-P%g%l=nSX;C@ZZ12HN{%DIf&^prud%0HT0U0)POJ0HHt>000F508@wo zs-*y+P(lKPiUD5i0011Q3ZqJiAt#e00)2o9%b#m03!C@cI|cX z^ewwOgP}kG00jU5PyhfF00000KIUddWzUy*zYj(XS zo#2k@&yew7MfZI4zGbj5%K6?ETfXnR=e?&s%=R5=*Q$L!xO%M*Oy+NSp7(1bO*@-! zeNVjg!`Ha__?%a};t#KV*Ig4ehLd#HZe5%WSj~3RyPd!h+Oykk-8A;c00%OJ&5)RG ztzZTyDyoWr1qzf8ce-*`#_b&`~O?by^sztE~1O3vuo5Rh`LsGW40~*GGGf zPkZB|RQR@KQ$s-R(QfYVxO>gByB(iRb4rwhNl&iel{AKY-g)hu*{4F9(z?@gEw^V} z*k>>R001Z(;r99-PrD}1S6tohk5Or+g!>WWuW9l?;k?@!?(**4fMbO2WQ3SjEY|`r1R~S-8op4hizH5 zZJVK|2rd%0M72}MN_ly03Lu262{qixapuq!IDEzDy6p7ni5vE zvAVms$OAwqHg4?L0noVH4FKD=8s*Sk7t5?i9^UP%z3b8Dbba^F2hWYOH{EvTcYP0f zCNbRm>+Apk0000!$C&xIS@S)4>^Ugzy;~{NwKG>SO?}u?-^ zYoHD4qPvZGO1tfYRDJWhUER|;dA-{3yPL<#rBr|b$7zpvoxMJNA5aw?>zhca6KxH7 zeeN~f01B<{6~4WFd+yyc)y>^ERz2)Q8e0majit0`DFGT1=yEP;Q&{Xck1STz7;T2u z0ZJMhd8O}XJH_ForBPGT#_v6DbiBKCLidMCgSG(i-E2rb0N1CX>GlKR@1~Ri3i+WN znwxeQcJ1!PQ?Wj}`p3q7?Fqc-TjRM6 zn5%=UyEmMpdvxS-dE2MDX7*z@x!-N?yRB{&w(fn`nmcoMZEyep9RL&n4u-0(==*Bv zZvlY_pIQgEv|#k_Ub-!sYV3F3-F){w(DUYj<|MOax&;6R<#2euGN*3La1^9T%+}iO z?G7oq_keq?2iqmq54P*O0YFdy2J_0R%t|Fb=eN7P+2?uNw>fR*#qV`Zb#{ty4$bDO zS@+O4zTdH@9gy?Rx_gJm2Ww{7>!eaL;wXwQ-&hMT*9zW5v9^Y43I zxLc>YyAMoK?X2w8RUHV{rRmw7cYOdf3(p&a*_F1J(>865RH?AlX}fjRBv5yydcX(UP7O~WKtd4^K!6a_AsGbIYMG=B28i~j zr9DhE!XXGkAxx7fFsGAIpfmshpa1{@Or)YoH8jK$pQzf1^*u(Or?pHV%|J8&003#A zlSqjP1P~^Gig~FX)FG#o9-sgK001FKh#(1&nv7G_#Gaa|_>4k%O{xHB0NR0|2{e?@ zff^7H6G1Tq$iyb26V%C}pc-kDLBHb&N`1#6N(x9-bgUsyHpNx17~aGF{`qHrI~gyj zvdwoZg3&ol6NwD74zjpMq34916hk2irb(PgESOfcF~~?RU6+Jfu&ro_tf1O(h&3XD zOK3n)QEG$%MKuyNA}Kf+Y~^tXx}p;(jvB<1E0mF{MYQEv)d-qRI;99k9!2g#dD?m- zhi-AXCUWCU;Y?O*WTJ#3Sragh7jjAcZquHMh9OM}p*Pf=Zc8Ouq;WWz$gNvhzISez zAxX07SyNR}Cp(w1@#M-VB6EUtDnQ}1jOcgHj#O&QGO9??wM`PZTrA;CO;}SEPFzVK zvJ<5!L0yWlt1FZsh*V+3%!UmPHCrxSoz5Y^^e)OO#w+dTUN!PVjj1R7ny88|`%;L< zJD>Z9Cc-R=iQbh2QD7?{PkW@$MMOq1O|(Htl%lLx_p{RIf~E?)P6swCG8C0D0Xb7T z{QqUyKbQHdPaeSPA`+6|`R_w0$0O61mojRIC?F!D2`Gq1 zE>|F`A_{e3ooDp~q9_XySchgZL~2<^ z>f!aj4?xaTaus-iUCN;OLLu`igSb==-6DWc0{8dx_3!ELc=X?&re|KK9SA$D<3!It z_UE12-{LQmc}tkTcKP*uU@v^C9P;DOSD$W%^+#|X{1^@qK=A;1)S6~Lt(UcuR!!2a zx_0SjUDkeH;6(u>5G;lCghT=#|C@aIKz&SGBMQ(6AhPT+!##ezaOQw}zhwSLJMY=N zIKf6Dhiz3DkzTUbEk-Vy*+ya?bn?y;&ks!JiVfDo>+{!~oM@g0n>#q4ciSZui1~k= zdzHG%AzK>}4}Z?5b69k`j=K1_DGJ?<*%7FHI_ntjQa-J}>X)(3(D~atswfZPL*WcP z=t~vh*t1Bf>&CJup`?3GLiNKEfuT~G28tpO75{2UYaY`vASic8a!wGn9|&tHuA$P={xuE`ay!)%U(TBBFIC*OkMIygrwKea zXIMJMn-b(<%x`hJsA$EaRE4M@2s67ny9k*KJ3}W&J3}Z@#HFZhw(%Viy%MJA3~BG~ zvxcwZNW3kXHntA*rH*qr*|+L_n?JG;Br%_i)*ab6&ZFl-EIO~Kh{QuV)ux{<=?3}b z_xGNeU)}QW#6do@z*6V-Kw-M`5Zuk95YX(*hKv?71M2s=x5QywEpr+qH_T&lGAa&Q z!4bkgJlqvzE}?JUQ-_ir9WxM6NM;}vMDI+xP48ODyBb&K-3@0YfsZY#iIU%+z3fGcRGb@x!-P>GA7}n*eq2$Ff7mh$nbejj270S!{FXeMb@FlD1YT zR0lDjO-QWX=3xj*2o6s4TG~{0?J_RE1}W_eEX7r%Y$p{f$_l3IPM=76A3F6f3YD>l z_bbrF3#>!g%)w$j^n`gQG>E6pu)!EGGHfRU#nQAG2nFze2}ZyZ0E3!{VUfw$W?#&x0{{Ujw2&Kuzlz zzcb40hUr|`oF9iB>6hb-=G>l{$TBl%BSRa$*I6DgvtN1UlFc#t+Oxjf&tFM_6mUU7 zW9fS82fgxKC``yfOCD>{rf+XNz43~P>N!fVb-Ha!vTD|t-gq(&#J(^j4$b`uiKg?h zPl@iJ_Um4oqk#-uh3H3bJTnf+LvTXUf%)kR#nV|>mZx^_2hG&tsv)4_6M9TWT}529 zEwb_?{N^{tQYX{663pDl>9$2s!(-n5W5Iuh_&sY?J-!(7?RH>%oEs@v6Lf{+$EP{C zf5HpUDs{r!(1BbN%wnezGh^vB$;=~J&S*WtIX=Fg=Dv4$EeJ9lX$J>0I={n1tm74u z@SG+pb@1;9MEp;62Oi%KpMHaH>EWJDzVqp$>)&DR+$|*hz z@DH0^@Iy5pm~jRat{I*ipC$|&^Q5ozk6y~U`_6NPVLQnN+=mYm7(-0EyHX#GKQCLs zzGqePtzPcWm%;{F0FA*4ZMSb|f%MRJzgF#~5X+STo5oCv?U%aI6O1;2ik%4Q{8RHS zw=*m8z3svwPFpfr{ExGYEjNkYfjK&YqTg@r@d6k@77RS64~@ciC*JJmKA6Wf8pIn> zzn8q0sV#jJ=&S?}6Q-NA9`dE)>I)4lY1jhusso4;9!e2=QrPYj&%zN##Gj_csY{&^lP zc10czAd|4Jx?$Ax&&xn9(Z2Dl;7HlGGsgOUarqS^ckF}3&qgbklgSXHxS5riRp_Y) z&`UDAAu=lUxUR%?wRjR3j*qC0^B)}(bFj00pCz+Nr6Bh*%s?gS-H&(g7mmAWAWY}I z#{-v^cc~j=3HS8+< z@5p>7{P}yv(>E~7rsc0Zzh;&wBdV95%}1|8w%OkDZKvsheC1XaF5a*(_LGkm4ChOy z^q%{Hf-!|grnl|$sm%InqrS!Q#$-<0E#~|y9ph9Wx8)4oU+*lq*?puzp{++J=&GLD z==l-%uM3BgW^Ik{Q>5^*TAs z^SQvy1-4Ojtl(p&y!K-b%yH@dc0Ko29?i-mlaqSeGiqi8%$f1I0DG_cG-QYzrt%pCm|%f=}2#(XTVjM2Q)+b+R%Sw29X2Y!qVBDni# z-w;$mz0_`^b6C7{-3U6A*g_37Uv)Jo&N^C%W^G*~9wgKWvqE-cHf$gkBt11OGo-tr zzUjAeZ(nqy5p|qlV^IR`!8fS&+(xivw6ntQF{gq( zH+7@tYf!_ZYVVyr!fot9;tM-#mi7BE1l;q}v*A-KdDl8{;(XhtUxU$dC$f+s$jkcE z;F*B#3r=Eh<9%s&pF>sc4iVB=HwRC|NS?d5Cw)kn7$h6}X1!Bmsqb`#Rx+lwhhtu{ z(VB^EtAnef+9n`-nBNr!+oo$*;{-eYd|AdCj>NBIU-7m)J()J`F8ei}MV}1JZqnbB zh2)g{v+Hk+(geYNpB3jt&L)lc-l)BCVZ$7un=f~zy8XhX&n#bi@ttOBqs z0(y4&YPIH9htEQ~sAI~fx5^Y3&^nnl$MWrA}DoarTi4LFDJizD- z=PdL$adQ%&Ek{pwN>z#5CK^F+_P(_Uppe%P)7|7ipWY%we)(%I9=>YFN|wSOw+NnJ zpR0IJ9)p2}5j@*DaTn$qu6M_1S&c=pq#A4992*#pw{|W<+2rb8X|u?0jW6@$i;xjE za$n}Ho~#HYSo=-i&Y$im8Fz~`Jq7STs}YEwl)_y)Y5@EXsaA?f(~R6f1G8gs~-s{0Fb!?fd@6@;!cD`~3VvAwJ6dcRoKW zcGNmoOhfNH`WIiu^ZL8*VbQVtHIWpZ;oR~y9o;;T;Iq^YMejgP^SAY+^$5v5QXC-< z-km&Ot~=zvr9EA3`@M$M+@&Oua^y2_-G){Ejr{OGPuu)sU%%f&>$g|LPv6(=v)eZ~ zFQ33wI+GvxVtBNT5VzM z4H4;EiwW3PUtaYpAd6A8t=Sf!N)U;m!c*n~lTtJpBaOt}arE^*=g;)J{Q8#%oXqsW zo7_H9nY5^%y0?${Ag%ToQa<)VB>U^QH%Be~-{GZvDZsG=WRE)?KN;QO0s3c*{C}6n@;y)AANl_Ow`uIoA5V`z%|AVHpG=Z} zdk)9HKY!=b>-PFOdp`X+_DFs|)uo0_ous*VBopRSISozg$L+Ab;rH#)@NdKWyng?` zyYDN@3O|+4Zx1_vORfX@-TLi1mOuyJvHkXMH=}qM*Qf7OjmNX-Ug&=S8K*H^blap>tC>+9IQ#A)mP->^le6;_*4ztX?q5 z{gc-=179;UrucP5o>H*<7=6v#RpR#Tv4Sj6C-UqY!`h`6my;d74^+RHS-=tLF_%bi zPw4pe^xS-XI``H2WbMPQqSkQx<&jZ2R1r`t1SaSe0Qo!|bE7@=&cvXf!#lM9*hGjP z@YpU-SFhC=U7_f>yo?v~{PmgK%`E7mcJ21EgB&CL8qg+3(VATKtIKT<^m2E5h%YlE z3%z>}j(2|cKRi@Vh7gmqx(9@kXo?w)-L>!Q7gnC1;U8^MD8UPnkQgYSQCJ~@4A#Iy zZPCq8OuXwC{FGqi(a%ht%5%YK$dWZC(>t5J(Xi( zLA8UgJvHyCz4Z@MsUO$0D|^N=q>#dcQmRt}q3H>LkNHL@=0GmWQiR8z=W=}mr9tn2 z%1^C%m-KO0W~oUaP#i67uQ1`RYuj7r?>`^LzZN*6X@vLo{&(P<=7LCE==!}oUjL2g(iP8S10n9==3+w^ryJ~ zU13;eQxgF{(%89v9Nc7w^EP{XdFcHgug~N6_pQC?2%=My1B79Om>r6tM|xWKG}k{+F90H~M{A)b_XXku}Z_VAC7) zs#O9r3L{G>1CrlI{;#+G{pVe;`(N_?e&NS&hui$0?&<7l{JQ7ot-q}R1c>_d9geDWRHgu&g>Xb>SmU2mx0M{r`?%^A)-3L6<4Ml%$ z*B%eoFFT!Be#iVj&;B3o{@?7re|cX+JYG{j*Zv=0r`>LbNDmLDFVY>gEg$cXKHoj{ z_cGys>)$)YceBq7WP+H$S--cp#s5FA+U@-75}=h(HjmN2%< z=KL~jz*GH+r|{}+n!(;URfp`AFj5x_34TY}vvkKRKJYje5P!>hO zkRKxt1kUf)&VC{J|J46a<^J3MRbs>M@9)$=kLDSJNBV!i?@m8(0~*crKcV@~3?JA1 zf9|ud=bzj4$o@I^BgX%b`u~UgKc+GJV|>+)>HfR&@84tW+nL6|8;KQFRQ2l?Dc9e} z+IYRo?b7RqnY=d&ZfFpov>b7__3!85)0gUb7yhhg_gD}*mHkE={eR~io7?>vFhN|y z1-+&Rne%k#9xl7N{W;gt`op+GP!#$>^LSg3{8h)<*Z*LW{WaVHjg&(B?;gJe#P%=I z&8!)~X#E^Fr2${B=V@OM)H?S-yHXQa$Lr7Qk^Q67{(raoI{s7Z{(sf-|J_4=b7nut z$LU}0A79=-{ysbG`tSMy_iy3h#y^!cFjl-hjeX)0&$lIkSJ3m@sP;$h2{jIC=*0- zL6Fb|1F3M1Vf2*Ln}q8Ns5KwAOa2;kF9Krg=r~Z<0A*g+y>imF!ubppL7D8(0 z+YgWIaf;Fw}u4V zR6XNmhOX2J-ON5ngO|Gsx)^lOINq_oMme8c&IyYkNZHZcdSz5_C*w#EevYJmj>rYg ztexd4?P+}*CCy@kYd(~54+u>xC>)*T*OeQ1(1FzEFxzH#Os?o}54R5}4B=h-U^+I* zfrXMk!x&++lPA_EC=vyv*mIR05tbgLrSWGmLFh*4CmpfF$mmNFSz~EB;mvCErV}ku zcc!$!AP|kLPubZQx)>tV$XICj>ldWJ1J?dfh#--Z-OvIB8waH!lhr&jZs>II$P^g~ zojV@1*G@M0{<3;Dn+fyr{n^^*%iv<$Kfr)7nDjILrr+*=K(C+K{mVm#fA@bk+>hh? ze`omg`PWYV9-q1&n!z}gyhEKxDusT#6bL+SNmVPO<4^QXk z@BI2-d~&z&y1#+?e>;X$=lJL8uOE2R)St?Z?@+qq;Nw9NDHDcgWT=*P2yxJfhIt+^ zdWlR|bgrpQRBB>5Lq+aPVj`(-zsFJtqks6_6G-O(J>WfE-`h^>-Wi2wEt}N;ku0@a zY{JgVmK&jfqe}A6Ez_p2ien59Pi#gtYRt(0JTzix=Ps7=AZw^V>oT#XGkK~RLEeKw zXI?eNyjrX@%dQ+=mQ=LVUlqKo(=HL^DfdmxoJiLZM;E(Dbr*}-_SnJY%7R1+y15w3 zzVjHNOxo4P0k2PXSf$PAYYUC6XHK_b^kYwEEmGz1@zv(?p}7n|zVu#=$f(l^Ra%jD zU|(k}jxWt>COcbyYTEy5fw>p#^TXcfZZTL$M(?Soof?H=q%@SdR}TW@qLF%p`D-W#W0M=MAkt+D7 zs;yPA7scBRgLS`R8q#Hl9P63I$UHSlYCU2pSnl0aTh%LE@Lf+bH!~Uqwr=heZO>g( z@NdS(PXI>7IuCMTZdmW7&1PQs&Y*PB)6-Rf^6!oc%A!)vfI~R{Me7f(S$tg!;D1LOdo~HhX!V_A!y$ zV?vODg)tg&d6{t`b`IxY6O5z87O|dC)l^nFo$I9=b|Uwz`pbtL8hLxpdo=7Q`Vpf8 zK8SHmz{U+{)7JAx;@6jlBPn%|t7Kn(UMUW0@842(s5LgngeeAw zBv<1c;?2FNHM%atw4AhFrK*awf+UCxj10XK<>d87TI`-E%*G|AF0lO*n4tKaN0W@+ zPnb!RuWa=!A$I*kEEdFPbo8$#`)zd?x|!E!S&o`6u4LD<7!qfh?$H%7ggEVw7%{@m zS@Uk;o%CbHnHSq3>*;yk^!7Hwm6wCJ^_zLLPUi0&+-gH}8n+uh>+xb$)1tC)@EZ@m7uyN?$h`{rk^+rGs;>khU@ zSzVbWw>R+6XtiJ>ZLU^eD$^z+Sq6bm>u~w8;b3hl?Du!vO;&Fg8{1tUIF7qF7%;e> zh3xmU|*RS2BL^XT`8YqMvyop-06z3Tb)%Eo~x`XDY7jGn4Y z;yv}Ih3glCdB9o3S>x9ksT&!`-x<%n?e1Bl#D&~JGMDVDQxT%OR}D+_yNd`V4J?*= z2MJ~5`E>y}(k0Oj+`g${IkQ-$O`dNXG|1nC%)Z4;VRmLNOo?v0%q+h~kB=OKV{Afhdz z2fj>~5=4q#AkyvAPDXjO7VwTAvgDe;J>Vq`X@pk`oLh5m&b&V_l(BEvt(?er)3Dd4 zkiuL^Xnq6|-q_;dn&SM6rNx|3#QS~Q(zH)kn1Y!CL2?P_TF}m9P|IPO-CYyw0XXg_ zo1Nc^Fvcmg7;b5V!|~1HlRJrIb(ed?wK&9~)}Iw8JnI8-zOFeiGB@fm4UkGChp!b^ za=jeO?$J*>I(oXe{BB%1jF!^(_n^erjMa^jgLd0@ke<@rZrv^{iX?nb?_=M+eF%u! z8}PFVgz}%6W%m+&s3+O>AR^_xZqL89wulPv5r0tSi3t?4+o1uGf(`lh;lYH1N;#+R z4_4i2t%&r~kTYN|G0%)XkJc}b5i!Q|lO1^%;Z&4$)BSR(M>MWGe!JdgZ(W`XvbRxC zzPeTSIKWUiJMS@02<&eT;WScH0tN%V;rCHEkr)k2gja?f>vVf*X z9@|0he+)jMk7!UNj2z|fs2nzODDYe-tn9oO@k?1E5u5nfBkM7LFwLc+*BmnhW*lIQ z-3lV-TFbq(My|A-t!~fX+WYv1`s0Rin{w7ByD7q|v(>QA)^)ahE8+j?e08GPB#$0M zi-AOs7RVYr#_G$;7~?yz^=E|etQ0SPJMBA(iL&~jz-#$pENIDC@{$VZ~)0kskMWas7=NM|my z*8|S?yNE7}Q|3P%PWoSS)!jBNOJ+P)uCCQLg6$}tkO-7e2-qSF7a}zmi^Q;G)M^WN z_0!znPa^YHd>lSo;~4Jo7JN(d(F_JKQqlIDE$~86q~hSZZu-!y6l{ zV+^kuHK{_`-fL#%s2@&+>kaBJH>qBTg2Z|MrFSO~)z)FLfAFR4C$-sFcIw`+6G?2Q zptfiG$lu?*wj#sP4oAyKcFNlYJy7h!gfS+Qck*lM@z!n6O;E6R@_u8z@Am74hb?d# z>uV1FK>o~q@bN1$(K1iARfMghYeYZX(_OD?(dS4Fn@#YW+by7>D_aNA(H_#0E@(3( z*zX#9X8kZem9l5HStM!&YGukWz{ivQyb}uuhWC`%j@iJY66+iFGf5LG$8mTbqh6}L z8;k4KuY@L9V(=)(ws(`!*NZG|?S@OeUGavpWCb90a>o0K+<`a9~z_(T6M{tJDt zsrzugMZIQHV2JSA-$Z?%qU3&Q3zyoht3Q_JS@wMxCFr%jYhBe zSQt!g84-Hz>aWJ_=zi<+K+)}v8rLf<<@QQ+py1a^x^}F7WxzTGbNl@24@eTe*flK{ zxpvdM8KrEcYZKl+so)nG`}u!Yy={7IiAvvtu4R3=3l!a~wxr)|{B#Bz@XWASUjEoI zvyga(s7nj_N%q<|)*H?AnNBRt+-5YJ@5_yC+TVgLW<8Gz1L)e`#^=JG!6U}?(vGXX z!>mhUvHh8*s|tlgF(;C>ki}+k@`5Ab69cl>!Wc47Ny!FrH62ql>iiHEO}J+&XX%?C zP2Tp$KhYt%4xgyc^^HSs2m}pqgN@_JZ00!L+g&+%qy` zYn&aP8#JNAk_o1}>C%b@h;&f5LkU3M8 z1a6rZaYRyvfI;Xjr}XhzyWENnm8lF$v~vstROGXv46x+Mun!*F)2n2oZ?ZZ*+$qMG zY>Q!s1iuN>Tw%&zLY;20x8<7|Q1oH$BL!7_Bg~m^dW=Aidye8Hj00&SL~7(KCzo!X zoQ$1b!4Z-=y0x@mh(9*pqp400(~XaXh=Tosy>rQ!&bZ!AYZ4c6LypF;H**jUpI8rO zD6dT209iPyf%Q7L`-&c8znZ_fx#4xI+NGu1R2n(b6xVB>77C7{b~w-{9WPmAL0Hhp{XRE9RDKo--!Mx?UVr;u=@q)yLHG7mJd zESp(9Gndvgab05%TW)h~Iaga~Nj8$q7EM+7@*SW$?VDBWhaD{|{yKA?NQjzz;B|+S40qG96-zy4^C2m}nPBJLL%&JR;Bd|$AY0=m zl+xJ&h=^iu_I%*f%mheh-b7Hq9T9>b;|_8Ki45;Kd?FH#CG(GZ30_*(ey@-zzHbS0 zXCmPe${W*Q<>jteK}kadMzaf!+W*2g*UnAm>Y#Wxd6+udL#AzML_53I{MPX&4z+e` zBM#VRelwA%t5Xo&yTZOBdye;4T@`|^6(@Rg6hd7E%L8HkKa7_c29HrCB zdF{_F8%h`J*$Nt%T#Sw&s}YOv*uYdd*=4K zztpY_aq4dy6gSkxta&0jcp)O(`*nzBd%)hTX7cI1q6vH zX>yyG>JM``)oj!j|#0c5$*qM53{*iWB#VJ|9;T*bH)s_<-dQ8qTn$=reCoPL3Ln=xc(BjN4m;Y zVR_W9%MwAc1dbPA<`dYRPlvZz$546xSu>SPch&xE;vLF=FMEfby8C#Tn!i0iD;8otwQ+hEK zRaj6ODqjj6x${B(5dj};#IYOGoP>rtK<3Ce!H`*f)|t;;s{e2Kf7SVpbM%ZPJm3y} z;Cqn~{SI5>8u67Sbqp*Q2ndKYZ2Am%aBNTHfEbAxkysZu*USuGS_EP6TMF%Tnkg|h zEEd=2>+u}@GVXiswNp6DRzepTSaNWa4+~hCyiusLvbbOLl@WJvJumMZs^VVo=l7dk z+)c899kOQhsp7uWdsEjT(fSFxKd`B1Xm6vn)j>V!Fh^vp z@X^wakm(@hpm&n@SZo4b^3LM?YX{mHxKcfCJ`;cF?^Ym73NI2qDNH_3<6>kV6(Ucv z5d@M87HfB>qT(f!05ZqFmot5^hiA`gBgr_%xMP8EfkP98Cz2K5FHD$kxp&w6kPbc@ zq^&6%8|+`%vUEqHK8!GcNXNFsRqcovhfNK&=_Ew-an6uNpVu>6wSW^*o4q+=&UT)e zjBfONy_boWn{+xIt`YVR-kYk_yPrA9`$5I^o?+yZtTYjHw+Coq@lApmozi#6~+H`PKyxjDSMuj))~);Pl6Pd|~c7hVog#e8UPaEXQB7>IO-a##)Te zbkJ3_x@-nonf;_E#eodnb&lds4f8lmnDO87OFhsG(-(+K33QPW$3-`M`(ca+E41OX zbo1Lr>Z#8^D$ogw#OI+k_mQvVI>FW|Erym2=+g(oe<-eTaErm;c)6)mv!=(%;UAzf zM(=yS=O|%3TKoE7L9E*NelzQTU!Hi=H$->(d^&>GPvvY_ii*xQP4B+ry?&|vcF$ac z?XX{MPvXwMHlDk=@MjZJu@O`jH#xd0sH_TUDSUc=uJ-bgmh%bpHtIA2aROKy=U2k4D7$WZ9>f zu^%5Wu=D7%=MmCSZeC(v5AmuMWZQ+vBv&p z0}+4I_{TG7P5NHyH3Yb3BfY!b=b|bMQlm_>UzvGO(C&iy6!LC^iOox!mzMO9>6?Gw zwYVFf!|Nnb{s#k-eEnR_8nrR*=Wp8BkoRAA5KqbkAPu2>Bws~prL8~9{95;Rf&nBF zXCJPn2tHEAH7Jg9pVu*u>#@8%+;Y>58&p#hnMP+T3<>&4FVrxWX4bt^_6bNS`e=3g z;|!#Lfj))-#+(O8faD$RhB_P0yQaapxY2M6QTuKGrP;Q9VayQ*A8Q;!R+^n2Phx?& zkvaIM@KR)3bT{t!svD zqi6nYqsi7d!?#1+1EuHho(Ng#jz_On`)%HNem`w(gHT6hgQ43ou;_^*+<7g%V{`6D zEDk>Q*YSNNvy<%yVY%M^o+qx^-Qrp;)UP?AccNJWZHy`NErvbl)lSa3m<{!O zZL>3MM(?uZxgHtfz5d@3B31U&8glQ;=K!69!XLvh_rx)MR;bK02-g`&IAcQaBBOv} z;YX(R6_DGfcwBax!Mhiikc7!!Wtjp*0?0SohM5c2C-j^LpDz?12%J7?F>%|o18xeQ zPCkY2X9f=U{P^>x?ifAMJzRfjW6xAI#d9otG(J0b4_h#>>===e5vR)~FRrw{zk?>d z_l7Qz>!0b(@qvZtyse+dd)xPI}%i z?v(PU_v=#fN8m%n@jPMIH~YsPx30P7_v@@2_k6xF9IqMPu*Q5;>saZT7ZsdZ;RAti zBT2`7xMBR`3=#mIEA3gX{BJqXZ?6Nt?u0aduNd%&;8sVdm$x$B(lgv}vKb{1{*?*t zLqImgCo@xwi^$<0Or0TBWKE9L79!xWlKznV9dc!=5IKdSjd#ZAlaqizC>CjO%nN`) z2QfUaP&bW0R(T=VUZvbg1Koi01 zqfcz=7_NG*@A*6jc+000$UF6>?oQon*O(0*F!O(N)OJ?aSY2p4dM z((OLpdLjovwe-?7p&@DALn0|aVUE||9tm}ZOu>o^DEepV2|A*Tx{mOt7YrI@unn6O zF8sR{vtgV$&oD8{rhrHFX*?2oDCNIK+;=_kL(Wq8wAc-Mhr6jM;pVx1{JaL4zG3qD zfOE9OO`E6CsQnywyi<^Md%$sx`iSJS5UxjJhkuD11AD*ewZF%^>(1t&{2_N|=s(&* zi?>B4z46?~PV5=q1im*5P}tXzH{uy@WMdGuoyO~h6K?T~zvr;K()}|HruC*}+6+Jj z*IqW0{l+YUe#`YAzg3sKz~mAM^FA{HpV&Y@5rbKF}RC z$+wRsm8K0tjH%`GPXpd?&X)flqkU$mQV{4n-?R1es*6rN;6o@w4D#$_>4Ep|+#*li zV|Qr-2F(t@pJv&{+pKK#4P8(5>BjPFP{k3R(}Ky#Y#;0U4Yxmqmss^Xfg|cysJkg# zHehY@r;w4S&yX{`ejy%Zf8M^Q4SMjKJfSU4`sl(2&AeVdaZ2Yyd(N~*!WYBwf5^fk z4GUw7ybk!y06@mn87HPsH4vPRRtmMg&A-oy@X4|GaG?)L2nCC<=03|DNB2K|>iN^Q zl{ho2WFxBns;{PgjdVX%dI(@QhZ?G|v96OMFHV(1=(L41nW)JS5E_gj3Yzg zkDAj4m5H3ca*kveRs;O{5EO}M@g{scd)&M2HNwD=a`6(%Gz11{{M4qP61`I<9?@%_zzeAh;X z23HGR;oV_$>YIT${dhCEcIcv@Na+tF<~rZVC5VV+Rx{aK-fW$*tvf{qRHKV96LWw z`=$p@-Y2@bj&{fMi{N6JPoZh@#A7xRL3vKR0_M^WL=6y5#yY^x-QQsFzsT>qk$~cx zqKh%P=v6;(mBq^5|60rD-$Sa|B=Fv^znQVU>B_AVM?@f5x@6$b_u;MS`VDb&JcOO` zDsyX>i~4B>Xv1g4Rrqhj?;ZM6y!?2D_0{o#(GKgHcgK>vD0q%qiw}<6dhB7bimupT zhqH)bc#C4hc`8k=2|MjL%B${hcuV&6_+_=m|DYhV;^oKs?#Ci)ngI~uFGNkQ_@roq z3ewT4j+Omm_MbCqiv5#0m6RiNWBA-77a1()cn$}7o6vxZv)iLg1HS# zD~9;QDe_q?^_x(eiCv&pm`prlp)La%vB%Edy;v*QpX(2afz2~&{lbp8-6Xyo8gzXr z9)5ejL!r!#2(z>1Ed5?cH3RT;;+6aJ^lfLntM6Jzsz<5{Q^t~MYb30WE0?ZvtTtfL z86cWckmG>bb_5$VB@aW@daFJUeK|KGCk@>yW6?VHri66eB@h!gxALW(W>x4nTNPoe zq;AAZ2}@Z0=PzHWEJya6v5~CAUuDSWtWVRtuU|L&U-tfQt-2Wx{Q290@Chwt#O%uN z1_xelVq~FjGx~aqZbZ<#Lxz9TB5%unZ%!p!C6!CT9X-C-b;mff_Cw_jD=`ZqDrM>1 zInGJ?sLB2sY?1nA-DG?A^Z#L9$vj$3&vWOiX5IfU;Jkm5@AbdUAc-CJ*Aer}cOxgp zsJjq|eFUZhyi1Cxa!DkSqg9qb;VFf-K8=5=Ly*e;NHiMT%<_k|dHk zA-N31IT#~kkT;ki-qLIduy9&`PfUN8-Oh;{}FAWU$ z#9M64|5B@4<1+qyXY8YGK37obrj{YE6uc}$ehuz`UPAUSCpqj~Ikx8`I0_-qaq zH+{8)TOQg7i{ax0ftXhq?2fn_%R(BEUT)6AU;-NbAJM1&)C4wQ53e?Tta+eW-8@`) z@(mt_5(>H_ImKk-Ir_hS(4cNH&09rX+Pf;(<1Kct;J&Z8CDz=I zb8o$x_O#RS{k=Cg$hmaE_qTraEp?8yhk-h0>p@nzch)Cb4Enb(1+sMg9m2K!Me`V2 z(?!~BXWQ;a-)B!4lss=U0h>*Yfsk!r(_O1<+-ga2=rneB)o*t<^oOl=tU>Wvbokz* zPvZcmQunuWJ@HL9Cb{(7kMiby8^PHZXMZ419DdynyvDjQkS`vSw&CU1GsjxUb9?lB zuXp5U&!*eYt#(Y!v8uLf6VL7UsrMJX%WpK-d$V^}!3r~L)9~QQ`$2DVI*okzH+UC2 z$+A7+t^CR$K(6b)eDi*A=RI{O`gY&*T=x<9NbOH+*B7IbgHMvFOL-x`hJ0ZYKe1yAnFkIPkZzgos0X6QB8o}wK9L?kF72?T`8Th^ez z8L>*>dOyB#E^@VA=MlUqHu|0M#|k*%xTq=ls(jZs(IYbFn!&~}&vKe}&3*8`f11SK zF74CZ$D$wh5x(%dOQ>y4ll~^s3VMvUx_OG6PB7!wx`Q(U_q`iqjRxj07{&1vSlXJI zG&_7*AHcqE==Rx*J&O%tv|}B^j$(RbqjxVfp<-Rn@!M-vmr#6MDgF7O$C4yX;Mj3| zyAq2xq_^go!22TSaR^=9BH{y#hw+6sjb}E@Bw*T7AHNfj;lCIhCt`97A8WJcY;OCm z8shKF_Hh@&mbB}m2(w!tA`SI&KgNAy93r*?v%0x%H8vXaKIOP}o2!11(47TLw68Dh zxz9>6@?ea06-L4=^pPOZ_x-c+&awTQf;#vvYs4Se^pv79&6%EHekI5l6yC88GEj3@wsQR&(Nkl6vikdgH^{~n z*VS)WP}A0PQp9^+m}U01tnPeFSjtC*06##$zemi~=Bn%aaeYL-H0jpVkUkJ8Zn}2`Ov`Ukz;$&xuMgMkdt8n4Uvk~uK9t5aO*aQ<};{?J13$BwL zRJ7sk)3A1SZ1_wvlxh^}^kKJaqZO=x8z2EWJ{Hk0+Gia{j@o76FTtF`Tl%R3#yCXK$1{$^S1jfd%; z{Smy^^^UiR`kshq^1~^J*THVSLDgqOV4MU#m(gT=d%dxFWF`psbEtIuFRYt4 zZLL24HU4euG~4RO0tjIpD*!*407uO+HX!+-q2BMeACt^@B1=2v-0~G{iUsC1@{A~K zh0k}FDO;wSg{kV4-kwPgkttY^&Gf@90A_(ntS~@=A_hVy{O0C~&1+?g|2_ZEEFjx5 zJ)cSRbz@>;>eh?svyY|n^$=y}_GRpd%(b!{ZRdBr@zjap?`&iWup;hKc)Yxa>w=`j=ILi$+pg3TjplzYvSy&4nn!WWI;56U6QQq zwzL*JE~X0u%k7TX9}g`f{1i1TJ>3FGML?r$<>K3G@vbAv^DDWNn_e~@hZKhqoJa7L zspGqK&GhdwI9^@}cV;1@*LMC;v={a~aEOhmm+MQvKaTwaSMQCp&0B`MakDZz zyEj0ln0sbW!MLTlz7*)xP#~rSuFV&xr8Ag;B*jkQ`_O3d9C3h?9dH8`!y9Nui?cxYG!T1jW7G<_k8?CD76V!8&*cDOH$9L z5dn3U8G&|FW^Nq7f?5IDY4yq-i&iCCD z)*j?Xa*MrrL8Qs!GJZNv)A1pI*yo<~; z33S(%?tTH!i&_Rfb++6M5eFwGuJ^-R&b+IQdG=~EH`U&cq8p?!_nrw$+fTf}JT25r zt8tHo#v38xODjG?c+KA45J&^5pw~ii>$LXx(^$o76u3s>fKL$pb<+}cxw_-KP#ZP0 zQwVB0f@rs$D#`hckPHcPbA0u$4vus}^Qu!Pb2}qh86|`1((HxO-QczjXgwBqLix2I z5&?2Z43NU62-+$bW`e3(E9J`)Z!en zu~;>=8SQcTihz={kQyDhu-b)^Aec?9u+?7IH3E&O+fzg_&$=UCR=Bd+BK)BrRqy)e zk`6Vd_S*H-I`LItjJ{x-?LU`W=>zShpAs`qH94F{jkC$qsAWMGBR`p9Ix=pR%tE6B zJ=IOpH0-!CW&FKLL^Tr-YH=HO#%%t@~z&|6)qJ^W#ctUlf~EIk`!qN;~) z+q7q5OD@QF3loY1vIIHn&AW)!&_RX`hDdV+ltlJlr55ZSu9|zNN5z;9usu62ls>Vw zH9zUEtzC>?8++M!`h`+YpA~|f!0jF4snLrYQ0J92dDZYIb<=oYapBoJ5yNhe4~E$ydZmC>KQk9^q9|kcZ`UX} zIoEDI10qQy3Rh%$VBoMGQ0+ z#$!jZjGDknlKR$>Ke-fkOcNj8^DEX<(2;vK|fzYV`km+p|O-x!HVLj=2Y zYbnc_l-_-k%Zpw z-_-=k^Ly>x?8g_p3fGXT-xQbA0k&q>Jz?%<=@72Lh3QU}@g%);Z00{| z#K@*J4rgw|{#t(ftwHKj03GyL@Lgc90V}3x?MBWO_iSUYP(^Jbtg?|<9pg|3K zzijb-r+>pESH6Piu{O|sb6TC}=TXD%q3AnvXMib<%jhgR(?kxhAq6(R1_&)qyfB5xvJs=n_;N`~wBEx!(^_9S4lwKtzH4`dfd zZOWGD_nKRrxQTI_y*zZ%c=ycHHISG4sNxDB>9c(0+gQ55gyE>VxaD%6vfwzYxOO?k z(}zgMe8j!)%f+0!Mh?bzGRfgk=_uuFxcpA9kVHZXfv1c)C#o7N55jWCoS zQHQ?M%eI!ie#18nTVaElWFHChM$@wvFVnal)?yU-l-p9AIV)-ivAb@zFHuM>l_U}} zVppSX-eD857TgwSCld+WB?YW)BO*Ru=T(dKHyWJvLB<-gxvIgxBkFTG+FwLV%tc%W<^2YhD&S ztKRJNAdMYiSdvSfQ`Vy;K3P5RaeRr0YNs2Agy!d^aMvBsc=HVYTrqK!s}+b`s_)i+ zmod%6>M*jz>HUZNFkvF?W~~sKu7wa07WX6995)L4GG~+^3(MYV?=*5mqMO z2<+I>817wM!ZsOnSYjtumeY2iDA&4rCBG~^Q=Sc(^y2B%U(LVIih7oVX6o+WD<%2H zmB_&t9YR@2>AlcBEW?F%{j@^-l{JG4XPGZL&l2yQX7aEtw0-#5B(}0EU&Qy9#w+1i zczv{B!67qTy!!K1f^#q)nPt1kd(D4=f_*8EWJXXKnZ~gf)-!Pg!gJ{B%}B*xeGD?^ zYU2>Kgjr;(p^{?OY;FU)Jn3dWalIvHTyT>#JV8WB#J|CqHMguW$wjzZl>zt9s16Co zO^<$^nA%`hpC7yH%wWFD@NNN-SP1E7j^*xWv;O*@4A_yQ%{lQub$80dofYc3y|ir_ zD@MXF;})aQVe*@KR=LrdySsA}>6@EwHO1#Uz;U7KHF=#d2sXt#8$C8dVV5h6FX4fn zwtD6H8tXF{mEFg5WEnl3FhE z+F$n65+>?TL=P^lSpNjUrJ@8K*^S}oNYXNap@-ZFZ`p?f+mfaK zh3A)e>6WX9JdH3LxSDyI5r5g8l#s7igDhpzCr27tYPECf;b(ZhuF$sB*6Ug@!S3|S zKddvFt#&%C&FcJR-rDub8-!W*VRy@Ah5EgbldOErja;U55J#){=V~n!US9*O>O@^k z?{G95kE;F$H`@$t_TpquxPj4(-Ug<6LO~JtIXA?3MG&eQX0hW2YB*vB=mu7|4@;98wuCkvE$RdR3yHfR+*UwKqERHjj}uCCSt<=W}dkAVhFtK)5j2JOv8nP7zD6ios7gY z4w!M&ZOV{m7|0Gd#E)I4*|rHGkLa1J$pDxk`ME^rk#YoAOfhr1M%kDZkYF={#D(o~Eg050 z3S%r*+R>wl@78IZ`6G85U^oo2EZIR1S3!deQN*n$KfYmP3CT!d)zy+e}3Z!v&u&VLbDfqbiEsDMI$aNp~1{O_{VLeF*W|3GzcUa z{hRw8^Ti*6Jx|DnXz6y(yQ*|BS}EhEeuSMIfZHdw7*^ZsT-M`Va@32bo|C8GrUs4m@p$xxk9R3^$mD{wlbqxi>S+-a3 zhQFgyFV$JI^jjKU$jmn%TFrI$^k`!D*}pO!w(6=cWnf4nK`Ab7S}F_Vi<=P1#(|GdBQC7KMK|JY|-eM%)>Eq z`iK+7(m#v zKNY*N9n}?OV7&Ml&%8@ITqx0k2ZsfyF(A<$k)w~DSt@v1nI}C_EC!f2kEVP&K(GB= zeKIa71%->JPS`qx6D7R8bv$YP)!0UpF83|fV2ha@67&f@2G#soj4V1)KN=UY$6Z9B?_J1V} zso6){7polG{L+^a%D`xNN4fj{f4dBlBzt6w4S%_YkJmpxd5^{uZ2hnwj&-D@!UWv- z9pdDip5y@ZgyC-0Ii}7e$;p^gbJI%IZv@w`zMx8}UZPZd%MBcNx z{d2aS@f=Gty3Qh@rQL*^Cj9o<{X1bk7bOd`Vvph|*~tZhdeFFLkm1|;rT1@=FZ*ZP z4RyX;Z}DF90+`KN_R3M`*7MlpBC7Z6e#QAV&(g$n*yZrxAq82WG*m@91;Ly>PVqY- zw^Q|h6uZ`IJBt>kQXx^?Fif@DwK%i_q;RKJM_iB-_jwU-py39wBd-~(CDGrw4uZoY z0WqWKfukaH+s7w9i5peMSA=+i`v%I!#m@bjVfZ$YgP^r+p3R>pUhfJe!;j>B%wH_< ziy$442SCq*AQjQGoqr?#>CS&QyiF^MSSM_`c6@K6J82toI zvQMo(4k+iF$am{-?R0hGfLl}%;-7JrO}2bOp@uXO1?j*=3H67t>=W4=+0#u~1hZG} zGD$bPlzoRvU!^IsP7R0kCv$ zcNO$z=$>aYXX8qXoHHh6Fck%1<)`=3qy0`Kf2Ou1Wky+LHK9sYu9@r?f7`hf(Qh9K6r`72;lGD?6OGD$tl_oFkP+Knf&Q* zz0VTtkDNu2B8-(mJ>0XezT_EzsQ8CABkKQkD}nWY|Q*WBt%rH+@aqu!#_b=fp0ihG3Ib3OFblr4EQBLS7K$ zythw7FnvSg7l`+F&Avy(UfR*tL6h-?)J-Gy*fk6-B>#QyQ_a^$aD+Iw7Vq?Xm!Xzp zV_R7&{Vn@A&TTR6?Qz@b9pPEV$b^Y0*gm_z!di7#+oh8EQ;StW zZ>%Ha<59@oEpje5>a!Vt3BG=_i_9SNfazFA(H+6EIbf=R7PWszrdOq*o|NBjQyab( zJF>#RF4k$%EFQ{vnVrgs9(i1^drQh1-fkUyOf;|;su@Yq3%XP>JaajgdHQ+JoDwMy zaDZpP?f;>02od(0i=Zdk__LO-DfEUdBtJrk`By? zC6~WU7)eEIjL-9U{5WIuP^=Lk#{D}`b6CaUhpCT~f;%t?lyW~t%}kOV9v2>=b=+R2 z&2oB=PZ<6Rfn-LDTG_hxkg$_}kPOLCslTFN4-&&4GKu|Q>iy%+U)4a|h+;?^qLHfS~QJ$kW4Z5P7YkkF;?#oCF^sLwrQWAVH z-Dxq|^vcy3ycj>18E7xt2tKb<%u)Cq>mKA-@XPTB=Ktd5+3B`%_1ez?!bYgD`1~y9 zLkKWtrmY>HG@$$Ahn~pC!Vg0esn_M(E3iSrsZc%xS`ZkRS&{nAe8ko(=T6wPV=!@O zm!wv8i@3U$#ogkWN^&TYOJ(C&yGk2oJ!*BVPmH`M!olq)bd<|MEP3nXu zz*!-LGeMOZc>w4`Yx-tJFl=d-a{t*oGZ9R+fZM6box3A)StEE6Dm?r3891ARmS z22w7e2@cK3+^2-@vs1(q;8VVil<3Tv8Dgi3?1Ru-)MF7tvo(B=?9NSy41?3}I2hM; zG(SL)FdT->KXKWZaf7Ip(8T}Oe`5A}#PhTy-&GiPik9(Vvd;5~@o-oRKGW`~Y zF=lnLtQ}pauZ%uxPX4i-R03W`KW6JYtH!Xg823T660TlC6BrbZE*PK6ISf^u(Bi)DeoVuw3~V- z`u6G{tF5uvY)>}ritoiB7$b$4bYAuCX6TmMVYWM#4vsd$Ndt!B8oa?iqUK`~ zr(9jLIHCr%^5 zwLq8&y#73yH?DIeU6~g~!(Z##xz1}im9LTSv@V}Mpkg08>HS^^tmGVhJKAQvgUX*1 z`>l5LTUl{9F1uqT6=FC22-{-NGH^h6USnL5IHhz!n6}X?qT(W^8>Icl zSbV}4Dy3wB$9SIbRZUW>FqwAa@W!HjrNActDcBghM#DY;M)%3x|R+4Cb95IVi5(07=~r3+WDyYq8@rp&m@FT42or=WDM zT%Q@{hpc}!@*|qYnj>*Ic@vo>T?@Cit6^N#RN7#L@jvXbG;_*lanp9lcKGX>vdadd z28%F2)IZog89It#e)9CXLP;AjhFL#Ah`GL>7sK|c++TYQSSG}e&X9uUd`##8Tj}ql z<~~*IZ3GCWoAG`kn@w_h-zq8c&34i1b-m}AL)|D2k)~p}_cA!t)S}d18Qb15dlqsJ zxTbu_FrPSXOy%|8c%VnrwJ)1qpRG`lx)8@P9#V>Ap*j3CTaXyTEw)%eI_=aV$>_A+ z5^|UPM88t+<}D4r#Ni!mAl5Q#P=9*$6^Uf7!T0SgD>FeXT9Z<#?{XW`ijj-G7H0B* zd(0pHxyCQdZvp;|!L~U*AHzsD8r;vM<0&{^s1*k1rfQo0rm6GH2M=^GhZ)8H(REV5 z6}h5T8H7R5tl&!#$WQ-tXnzbcoWXIq@iXeae(*0H$fI*Cdxb!HwTI!XKWwX#dOEt& zY%t7Q^SdGd?GKs@HI^!y(5$1A1{xIpc~p++%pfXkv(! zgQ6}Ic0-$_UkQL#&KTuJCA3}AhWM^u(&JW+YPWp-nZ+!=DoiW*CbN7IHxBoHYg1X* z{5G8vvUs!9aW80Oj2<2SutA|Dr)xFAfRTK8uz zx*<36&GwFV>e%TF9XimyiM}HYr7Z&}+VPAVI_f{ZJ3|+6vV?b6LrYhq%jb;wm+^+nw!X=|CyOrrF=kiMjk4_;T6^F8mfhO%y~` zilVl=?jMx>o^#9ZSM4)9-KXq=5SKb@{Q{43$bpI4hE9wf!&;c9&*KW{8dUGzKTPbu z(^l{pk{TJf@U?f$$~!N2X@uKjF@ZlNUDh5u5oZy2Wb^&l9bA|mSfqp zcX8o&8!}>Xo3-J#rID`EJsY|SyW)a#p^d-5{i{;<9Ft#5K0e}>chJr8lXd|ex#Z|YFt zwn#s#uc^PiL$g>j@%CUd5Db5^!!P>z^@?=kr{brN`>X+VA^x`nsX;G8X8v?O#*GU(r^@%O{jgi}?MFS^>EXZ|aL1T*Pg+^--~a#>^9W z20z=aQLn{!_wL!SCv+~s2K9G$)&`!coi55GLKCzyZbi>$V=lu)BIi~=2v^sI*owb1 zm<_bq+GEm+BkLK5&R?L>ePx^xAlkLY8cxDUxdNJkAG0%yEH+u!91>qgooG+scj@}x zNQpz04JNPS>g!!z-2NHWK#o)FnTO}u+2taBTJtkK_s#50@@1ilyCl=3VPFDXJfM+# zSBPP*)D`|eR6MWRu-NU^16luNGI`^A_3N&vPWWVYhw+Q{{MGdMT8)>J_trt=vKCd4RBdt%>_7euKC2zXC36~a&YUfS-&|vVa2S4xt1xBv9u+bn~Z(^ z`Vkix;l9U0dyAZ6Sr49u8?1jjfHFi7jj%wYVo4VFzGK_mOYC7~XC;SBoReXKEQkXR zt?|Oab8p$Lhz>A$ODBYPSQvXbvOG41q~G@!28fa^k^+@(jIWVU38T$o_>T;Dxs8~| z>An~@@p;D|#d!CPVR*-+hwH5L_d>6a5Jt9Z_&$injq+~$?rV@!GqCn|!v3#h(A&a; ztLqxYEg5!XuZ&6#&t6pvr3ew?MeXej{#TLXhW=1JkPorHoDLF2kej97(^{Uo`Sshm z2uk3f$z1zQo9F30;*0zKO7I##HN8&-a7~qFxE;LE)CZ(F>zo(K6H4_qfX4Ee*cpcl zAmJP-;WrkLMg_YetFlrB09gzF#?+&wf9L_`rtu>NDFu9mAME{u+KC-z(-* zdwe#c+stR&SLn|W4zO-Sdz(({FJwDH4booJBd5`aW|fFnx8grWaZ0C(mhn6$HUb7U zM`$u_LSkOOP4d{$8rQjYxL2~g=`e>s*8+H2PWw$g#d=z(m1Ri`>`MP`E8k)dFK7V} zv6*y!ji>3^f%IB?VVN6vnFHj6474?#&|?vo0|^Ua<~^j}$B(BGB?3dH2M?}hmry2^ zy({36Ahvs^Bj~j;{Z8OP1u4^9Hc+gbo?^kXvdn5#yrq!vvRTN>1nkaV5@MdBa9m?T zGjq70%d5YCc%hMS_GtvsLq@}zOl%*d8>9Uq@%~J?K3aO)Nt}Le(X(K(O;61`ka$Se(VkR9aR~V@x5=LFn zqE5dowG4h9jt&3J-@U*g5Wk{^qO~sX$v0HMziLE5a>5;x+YB6E>{owSGi*P;YY19z zqAb_?_=G~GDfGCwzv^s4KT4NQ9oO07u#&_HezRhQk^K%aw596emjDIdkgH#6m7jW!+2hksRr20cISgm=33 z!vubvY|YCv070?m3jtHlUmS3G{Vk3Ci+tda%s$#QbRyy%~ic! z4>!G68)yYCchq9Q34lRAUyw0s;6k_1=-Gn%_0VVUjN-li$a*Yn?1s6#dLu$u#+O9{ z+Ud4N8=bALJD@3Z~)Ap72u5TcsXjy*mX zZ#8a{Le>0d$)J^sceec^^kC9XfnVNhe}3oX#bTeQol4`N-%*%C2_7e87>T&FfibT- zRztb0#_%K1(y^j6snOyz*`1JIL^C-e78+)Y-hE*aN6FWd7{0$v2Qkv?bys0yV83VZ zjdpL@5RtPFbZG4Ys|?m=VRmf&W?Oo|E#b(iBss6Aqv zSU^2088s*mr}Td1WRJhULNN#=xiNa!a5qX;NEyZQ2^#9{Ot)~Cu1nx?S@{{p_%jmk zXK1;_ZekmG6`E{S&N^GzEcklHb2;7_vb96}Ba`)>pqvtZo_m4>m-wxV?({Chv-{Ul z2{g4fy0k!k_fMpRT@ABGpFR1B#)eo{27FxQb=)}DeC7E`R(-P6vQiTBq$S7A@>*DU z>fVhIEu)6|^9~^-5SC2C@1C_XIwIiw`M)=49w*+FNVE5m6&>|n zJ+yl`y#Ed;7%XF0mxNRhY*7)_dI8`sh%~JTvAuGqo0qeiV7*pD2O?x?ppnBnb?cXL zFcp{EZ0i2P))nMrXP}p7L@+@ka*TN*E3Zp1J>lO)t=~=1oqB4)5PR^ZldK1CS`ft~ ztccuUEpYnR-f@v|!4rLS%^G1%mW^$rztg#XNejfmju1xYqAphB-ef1pc%U4xxY`YK z|35ylfgNSXUx|nInVFiHHXCv}7?uWzQ}4XsL|}qt+0NwL_jx`zT(413mtRj{S@#jX z_DcRfZeB9O0I#(9Fk38`T=jSzV?w&+oE)*Trqnm{BaYx|KYFY+p1y#-4Sr9)U%h+V z{L==}M2~bj{5r85Gp|h7d+ozVp8JbpQ18X&JZj8C-b*npizLM{O5GMfwP&Vz!V>W6 z@z7L9z*}}$AbK}85ngelGB-jS(*_Y~WMrlO24dL~C}FMa5LFEij6W;q)1iXk#@jBT z$@f^FSHbizj$n-;Vpr4!()y)}-_jY{GTC|awj@FHk~J&TEHBGjl(;^FVf+{V4XIq; zcztF!qxGC&IKUNC)3L*-#WKS#oup0LG|X{Z%v{su!?WU@@kWF?_)M;|``uQ^VX_;) zjSf%T&SZS+&w8d9!@D*@P7T-(ga*IUn3tV8qGMk}i} zIDsH1L67IJZDITf^l(IeeB(JH2ve5#v@a{8(|bKMzo`>nBLKmU&hD^%s?dMXG;GNX zN2Np$A(y`yYosH;s4(;CYww-%Sn=)g*^|A0PbIy&t>NjBA>9)u(o4b-T$9Xc+ZXP6 zJK1r&HMe{|ZRW0J9@S*&|BFQ%K=ymyMMN?kb*LT_HBOFt!$9$q30}}=bQ~YY+rgaI zkrzT)SwMf$)H{7%2eK614`P@IkobIK{; zy#>Yn`*TJ+BJRt`i+hn9*u7Wruafv5P_Kfb!=r0aL`r7g?K`u+y)_59x5uRWw$AMm z7z84c@zMmp?;W1RlDB@{+bcpNqi?`70)*fdoXFse|T-GV`h*yVi;~sx!uCI8w zZt~5n(*m!~z3y&tBsP=5#-5x;wGZjqL;(ZY-IHr$7SLAUp*~#hrQ*r6Zm%N+n8sJ@M|sAYZcqLG?u%8o$OSLs{Q4;2a^64pWn-!oh*))}Y0XD0_( zH|)J*0(RX#+O3b{dpr9;dNX&WQa=AK+S(r4Qj7je} z?+Z(O`<+iZ=q!~;eQT-LYRxNqg0BA^R*UU$g{2~V6KBHyV!dEOZ=9RwSu$_J{cT~d zyh5C6;qHbc$%B`XGZNBY08cI&*UpxCFwLbd=QQ;?!MXJJdiS`Dc`=pUNunDgUj3mL zK&$Dm8`TQk4bbf~CwP$Ul|hO!Xoh7~+oznUc)EK%>R`ddW$1=C&Ehd5A{5o-UQm5D zNMRQh+P>WC@W#Rz5eHtQ*E740^`_|@cJI05mzi#z%no0=WFF<#dV9Vl)8nIl>T43t z>C(nGI$G>K<#R8{exE_{H^;27JGmX-ypp|6_uq`P=0hxcz*5yB4ka9;j{hjWl@P}=#ZXbi&^9_}DLvNhW^@W4*T#?gEK zIgHCPA&4qobo#2C+#SfNW=6h^XE67LmRDax{uke+LkGicN+-xAhGTOa%(3B6476Nb zs=epeCdiuG-yia6z$~5Yk7J3QXWM!R?e$of4e-bsX73fOCS-j#XTMY9-i>q!ank0x z&FTAWL<@@XphAP=Oh22>>p;ys$0s_wRvMh|4lg_ks-j@JH{4KpIAr1ZQD@9k^~A7- zxpQ}8j<=b@gwq2^EyjLS4!f#8^~dR!$n31ShWGE&YL1%?;o426YQG6qMt_ubeV}!t z-ya%8Hl=}A_Z_4J5Noh?_0kbg}rf6e#YoZ;L1m}qH^LdA{{SykkmI}X%BXk<( zNYmmRlEdML+;yeB`B(Rt=kn@75yh`j^dGIyou&gMk;#oEFw)P+wX?ixO`@>=gZ`(j zY%GcPoOkc6KbvoT$A4AFpvL~a^T-24YW?9EdXn&488zz2>@l}wy>21-HHMO&-ri}#A-O5=xN&NRX>RwN#YpWA^`>a1<2JLsU`^VW zr+OjehG6jhF*(D#A%df{ketR-WZ#qb(@}L)nP+Si9FlGv>Al7chyhT4zRZk+rV zIvU!4t#bUxZkZ1+M$?Z#zao>(vl;8W?xzQ)T147+4Y^E-7dH%%n~jDQ++PSqh^3*+ zwng>3;xf%bl6YRbO1fYq1fLI^>le7*Z4&tJw|?hWh+T841F-k!Q5eNHRN?&Uf&>~M z>`tRBr>AIGo}=4`g_c8Qa+(~l!NuV;IWwuCdOug5w~L$aeQ+RPXKa|fJHE775v?eQ z>6T#(7kP^H>Km0*IqBQdcYBBLZgmli!h1VvGd9Do98d~{*j`q$%5kJKUi}SUj{EhR zl7;d?-$nzMWdWX90VT+}3zO(Ru`$+3XRN-wip(~M-cY|cc9b>!M{avg#&F8E7rOmr`YHyytQ<2cP;k+J~KOYxzH^Qv!<(~sR@*ciRr{(xF z`AkTB$IN8ms-pVA-#ypPU$fEZXyD&c$BNS@9b(f$0lTN$FltFC#B{-8qpQZ2ESlqm z>gJ^;VaK^#co{7>(^Yi?&Z9`l+>6#LkwW!$4t24teMHd+L9BmB#Cq0RFN4!r#`cE% z-ko()mnve;?7L+dSFVd_LTUE;Pvq!9#(W-V8D z?1&lZY+m7=!roS7j*nOGr^v~II66~5Og1XM^LoOsQyVSD;g>1=u^=TVXAK;m(Gu@fv9M{STe@Ig)(&ciepJh2^zhFfm&QZ6CW zsNr!XDjGgyBp=Nr}oqDr6Ry%xQd8+4F#qW~)zX5&c zJV=j~FTq&S8}wcxv(e}F>u#fUWE;O0s%v_2hhSSbx!c2G$qbMLfXAtMoA_~~K5IEyRS@R||?u*IABXzPW(jjCBp8lKpAClQ|s%WIUi(qQeuN z6R^@VeK138vil|q3%#;)3{KAmhB{HmDFspa~U_oVDMmatUlV8%F5Rk&c}7FPTGYH zJXL&b&0`kj@LMMBS8#E*oNtbFxp<1j@un^9Am7%)n^Io;40=-?a*bc9AM?I79+UU( zWxGrX5G?*?d6xQKRtwZ#`0XbcWG|G|oIC58hTKg&GMIP_(r0qnF~4}eIf0{7iCMbB zdUF(NbYkxH1$M4NHpz0_rbl?3M`aSF}Rg<$|B<{C&`ep?#+`f z`67lh7Sf8qJT@nM$${%;Z+89M_ScV3TsyqhfcwWx%(qA&P*!gryaLiSuioS zV;3kCLd?z-Js|||clyB3_^)}LgNUfUJ~Hp)SmOQo&EA4OtVdb-c6Ym~l@X-wH03@H znRIr|x48Gy{vCC#X0;trG_S6yq&R)O>eD9iEc13_9Np?KiL38ILx+KTZS!$d?nG(E zUOwsG)3#|by#l%A=BeBR(R!3yn=**gnnc91Aj>g0zLI1K} zI>qd4@W+vYwDC^XVgiX_iipCgr=}SDyJRl$L$A-v?}IqEbEPg^e&hVW{&6*|lYL*+ zg_h*|-e|47B>OE5L^yM}AEu|WU#JO)K~Q2?5g`5|W+VJGAFGht4XVD`9dj4vd(hwQ zU7;koAd%e6o(dQKr*;kf1TZ1cK%65UQwN9&Lqq2o`#`I{ zZ-tzCpV?q1jCqeG)^MHi8#UzX|!o zLSEN(^SD~g@{lp+9x+V=>Ny54cNyXkMh?b0y4*2hNTTerp5#kirf*-MYisypn&ac- zI$t#Z=a=x|O7`}RN`Vb(gb3@U{>_2uGf;g=b=8l*pXm#?>OwAVCareMckeRLq^ zd^*yW<4=cDovRJI(sGD%@ipAtX{uLKHJ%%cpGPj9t2tD=_uOYy`qXrLjhE>8)B{d^ zHJ{1xuQ(lDQc)#0p`e8b#EL$pZ8&ChAo-6um5r`;7Y6XS+~h7Lv5InJm4<}nn2Kk7L9l||JPGzkT4jGd90I5x-f zVC=`fO`v~_F*5gi>;%2AcCF2Nc4Us`^l^1p!yI~7_|zG~=+JTyhsDM(O+*$yA=?-E z%w$gQp?^_lCifI|96igV*#?zfq&`nZt|+*texgGSutHLOp`s=t@|f#1;ljd{&c_3t z1kc=DXA;y?A{k-ds(cum?(q-NoV9f}$do5O`*_xN!kw?LI~kNtqM*t(Ue5RJj-lT5 zb&}qr)A#VLBT;w2(_^Q*P2C?; z$F6&8Xxrxz(&9 zcRfy?|Ho=_GR{$V@2j^tYqhRJ-`(JU7;c{U-au{hrs$-h6MR|N4XP)>jYXTeH;5wPgY%L_T8A zBF=XCt7(HGF4%r{@!Gi%uW@yx+=ih2Q$HANV<(=xyXzfg?E>$prwSnRmu~vSo?@=D zH+F0CTy**5D&BP1F*`p}xjWZQoO62~pRJ4ej;|MK@KlDs=i&a& zUQ3cTYqRg?jRbv~KU^Pu;j2VUa2cx-S`6)T1&+(j7tX!17eY=e(1!IA+>6}Bq<>Ec z{@gh8HCV*I#oEA*lZur5cJH2E#RuQ*?@7J0)=L+Dd<6j6-4D&_wF$aI<1=$HwaT@g zZSm&J?ca*KM3%4Q@p^1sR4}-h86`P4eEf!spxIU_GZ0yj`c6g0R{q&QI}!d6zd)OxG^oBF!`;-BqVwJbj#1j21JpFQYeU~93Y1H9zH5&iI+qLLMU-fyMvZG0gQvYVuf`^^V& z>h}=##Qnxt4wE!y+iqxaKH0g4_H_xfWo)gAr@^!7d2|UEUmD-QkHjldp3}JuN4E72 zC1ZMs@mpSN4+uI<3>=IxUh%T&7Dv_v^n z&0ktzUkq-`F*&1~j6a$6eFkMyq|tFs-xD%}`fcu+uNvF&*Y=!<^7+>1wpSU39{&9( z4}f6y{K#2a?ivHL@NOvM9Xv<1r2A*RwAwsqIJ<43o-tL3XuVHt@=TmN{dE}3F1eWJ zdbqy(T@LU#8ye%x>)Wve8(gli!|~K&A2CNxbCL`(K0AMQve4+KKNr-vR3~xjHnl{_KFEBfbXw}y zoK2pfwA6mh!1Pp4%Zb9al!v&zoO=$Q51z1M-MyXHYBBMPgwoiu5a)aDqJE=O5=tTQ z4SmxH!(Nghq1rPa2v3{AXs8aA@gppzkb*D7wU)b-`DRyiS$!xY-ojxtq@aI0~ z0o-PL-DpS7H!B#?LPC9DmPz0iA^mCxi%ns$!v0zWJNiOqnfrrYRon>{u_^q=yesst#mZIv z{QArjj}w!AxU(x+{#g_<^ziDu>U(G!ofZI+8lG3~*m=X~xegMjanHhJx_o&f$;UX_ z=%MdQpFfqQ0{d}(-si&vSetdMKh!54z4LkYx74?H1RgY0rK5#c{rO^<5MSJ(oR%}_ ztFMeTbR5BU_1Yf|M0%fj&M8~#_OE!v?ZF-o7vO39v0i3bFY57KD|@}alUQ8yg`?36 zqv)8(!%EWHBlXq!v**~m?Mn39uGQ}>Bk-c&n<0V~j!L)jg>R1Aaf4?+f9RMF6h(*p zIma(eeYK@NY`fn^Y?;TV=X>LGc;gHDB1cR;Hb}c=slJ(^8pO6^6K2%XaNnw}7GQz? z#2|)0@n+`|g{8UZwMsvmuGerpA_(sIcL5-@)BE3!8Uzor5ybsYiyRKeYlSg@f_<=O zE(ts${oZBW3~|fGx=IST2c-I&#xyn!&fL97DWs*hM^S4 z(I_{onj`T1^=yA-#yjcCgcaY$C%{oQ%7zzU;Mw})%ha_-8hTCKRdj`<5^c=05hReu zg%OjASt>KP8NnM+=3^Dck#jodI=h^<3nWnZYYp0*GrNVnWmN(wqCp}JldpxX(g+qK zqi=WlHX6L)>EEEseClRhhg2`Jf>sNk6|`b^;<<(_Z_v9o6{3$lJ85NxTqhk~?dy^* zHv_GP)PZ)G@iibkAXcpSf9p2wkiB#l<8Ka8(q;sYY_>+oe;3yOO^<|ic3TGbbjN9E#nAf5-8@N52(FthKachG)Q1~( z8)8S&c2`-+R7nI^Gl#wJkoNry<(tCVDiQ_M5rpO3VS>z3_sASC3B6-3jkN|*ZL)}XM^-qj3b`Gf6b zhcWU&Y8TplrLW=P2NY@PkD5TASOxly@zmCb=F^|QmvmwxzAY$jmj}eDE#eO<3`!FS zK$EX`hINtVuZ`aH*jAPv-F>$+&+hVuYw)wdl1Cm- zrvvhBSEQC+;xbWK^i^l9v4Mg^I4E5T>o%yQAm)eYUVcc+98pqe9GgqU9_v*eg}`0n zUi`kpz}L`RJS$5=61tKu(A(YP1)l9VC|;2LHxwc^_bK^fjxK)$ zK31Dr(r&wWF=Shgm~uVCLywZsGR(+FT*c-4mKBY;H-NeAo<23tFV#nw%bw*7Ikw{N z@9pRjmu9#5eRZXQ4wLYVYaV&!-a>~8@A`Ef&Z>wwUUa$2YmBc!xehQ}sga`3aVYGM z(T9MdGp|Bo6t)yhGnswant>$P*@$0mBZQ~RNP|gt@9f-p(6+R6N?0AU60F%_g2LsQ zp0!U|I7`vNy!OAsa(!HRELIP}j=!EQ;|}gI*FJe?gVdG>@!+{<)g}QqaUXIOApIf3 zX2g?vbpj&99ho-Saqa?O2q=MeFhsY{5^M@&7M0EFI3gL;8+0&}2l?9PKWx|xnCe4Y z5#+eAu@>rD$B0SkyZlCH4Pr^Q3ZEPFxV(QDL}K!ot#e|p#0fxACW*sgTdiF98f_#A z2Gbv!s2oa7X#&_Kqx?T{^$5r>o2(@HD)4VFc2fn@1^!JAYW#dR67JzL z*BK92ZF3Q)eI4=^nRV*vfA5z@uO=0jNn9g-dMNZaNWS*F!Fo2I-aS_8?|lwg?>r* zpv1wnsBJ&9ooqGS;`?Fko)-vHSDa#rmfpu)Pj+@>7i}G=_92$6E{1Lyn@{Py22RzB zrH4>1by*pQ%_0_5|D#_KF5Pv#<~Rr=(Cv9?JdnPBo6 zydW@;mqys`XVGWF_MiZtZA3vrbgJRnUvW-wgI z;a4)7xd^%hf@}0&+w?0>dR~hmqOP3`on9<7=Rgamw|$bHJU_e%J)TJP}nx@_!|{?#H@L?21}oA^n)xFnPG z%6qu>zOE#}z`xGrx}9Fkv5xNLdrU9f9MYN#TC9@@?>ZBBi3YQPz0eotFVtKNE3t-d z*simvwZT^on9WBc8X+Tf#jWVSX$DB2nESB14?l99|H=;<<r!IhCEBZts1o_p@!bU3Yn_r@j#1n@|5S$k%I-6*{(R7X8k~-@Puycp z%{w1#`Ja6&=5cf!vN!v#A8`<5wb5Yao*}qZeNJIT9}IeoY%3e>o9jH!HRdD!6!n4f zLA!g);(fy2Kc*ktjj+ywwCvA)4P2Sr<1G`2AFrr@VDp#x4AaR+Q*L9p1=|pihI0P9 z4q8#PW|w4U7es#&1Gy+7tBV)KYWl?DFdjjEmvPO{u6?$!f)_GbmdoxNpx)n5H7Wm6 zH3&|PEW0?m#zp%7ewnlCZ!0prpU*#sediVa|Dni33I3wuap50J`DFPZn z_QwNtrPOQp_=n?J#3+BOf$M(eKc}^K)>`moEH)8KwDs-O^@0O2c(OeT#+2S(s7Z z&CDHK^^e-WJH&T?sEv8u;=gr+J%764aA384yMQg}x9KL^)Yu8xv2Bm)4=K^uz>*u`CpFW$fj?(C! zvhLmxp=YmmVBG#+ojB|tG#u~acc{15^3tE-Ir)yvo}jr}fLG4D&+qtY`bBq$Nxrq~ z`^Wt(PyPiYloARV|H76W<=o4lSeSVv>oC&H@-blNHNju&_j2m5cV~7<%Q%kiI}V#H z$bhFNQUT@_6me8{9IKou63t4lQlcWg!tu_TM3PMutEx~GDXi8h(k^l$l$D1&s9dAb zb7XPd5W6r--{uA=A;YWKT?&YcKajv2)66LURC&f%D&7emsV?bJgcl{j66 zz$`mX_584_ulw63C+AWXY)L(oZ;94zTat1lIy%_s`0&VkWLsH&(bM4-8JGnCUf zy9#u^el_^~pB|^4ebDgU@~W!(SyZKXu=P!&Hr%3m|2z#d zNkd{`D-$=*dGpKT+lIB=A*F9ji8dp`AQgo~WQ}E(7v?fS7W(DifZl(0POYzvq4WGp zcsu_MuBTr=nCH8&ofKco*AX!ktQJbjg++mF3mB>mO4_ikAIInR|1J3G?|S*X%>D7( zJxopikUl#~QO6p;P0YEH3RU*&{laLW86ooWZ4HYaDQ$tMvRRU0y1V|J#6oFAh6r~|ZGzB9{D@ziM zG&B??05npoNKjJHKbMA(bwrzBGEyL-hNv1*prD|Nut)zEK&q)<_&(fpKeg$H)&C!P z{BpbAUPCcIy_wOJsq;fl>9)T21#qph-qbSX@+Ev9?*4JU=z7|IZp#0u{lDk@Ho%|X zF5)-+-}^N8+NYiKYFF8PzrXbLzg=Y!alK`lB7!1)U75M=tqVl1r@Q{2@H^+mekdx{ zOERoeDNHfUPvT_b_~%ki%nAaUM}N~_gU|9j|69w&o5-`wBhP!y6PbnyxuaapeoMMl zExk^rm~Lrh6_T=|`%3Ym@`hzqyv#L}9i@X6UU|n9M|2;{+i>LnPu<&`zX$9Y>0?c| zILO|O>CaXElKnk43VwH81%JplpU!rDYf`t6y|wgq089_@LYsH%`p^5k`wBTv;)790 ziXe+9)e2gXqNK4K)3?`0%lt3V-F&-#7p9V?AJ=WZ{An=UzMm60{+Z#I!p8CN%`pCK z*opmaAap?yRTR~drB3-^FMV{|XhWOU(Srsc$%|sdbb$)1$T#k57oD-(2=qm8j?2~E zJ&RU5qJ!R$-k7*NVQM)5V$!wa7b6C?Yfnnv!bPU-F3dKUYY=aSI+regbp$*1!#oO1 z17X9^V2Rna76j=8eVW)O-n>c~2*%hYs`h`tvN%R#)bRj5jd|9>C5`Z<-?-%p?9d2k zkfx*uww$uKAZ+=N zPZcd@i_p5Dg_Ntc)Idd!fgSUn_WSO8bo1NWnz{i$qa+CwfeHMQ0SbMR3NIf&YRknb zOt}B_I}^~6L7LW zf1ffczY4$jzhBqPRdb*F;qE{BB9OD>e@lOc{XM$eFScIU_&3+~*4+MI*}osTe_!_h zZ}!V;t_uAmz67&e*OmRMgcCsq4FRg^_wp}rb>{boqmumTbnq6jlhjojzS~Y-9G3QT z)6!UhtLe#e>h;30#>E^N?5fJwnr`((Yt6Gf=O>Y$_&S(S++w^gGpp8=h-it2jd9an z^^1j(eQF5BNS$>o7IU3W^tb((jE9;I?f>dP_;C7kHbf)N!-nAEv5mIqGtIlenzZ3KEz2~#teB#mM z@S%VFeO9mQc+u9Onza-YTK~!dWqaNKr2SXp&GYO}v%Yq}Ubo)A=Kb>jk#`zKdUQYR zyIk@v<|D`vDcXDKLuLv$OV%@4y?6K3+x~sEzJJ}88NY)$pUO=RxhPahuHtc2MIlE| z4<=_VY`m3&EN+Nnyo@nn2Pq=Vb<%p*ZP@v z(8+;EsC@PHU)K=z<8JxSX|U~zh;^v7kaenk zXl`$EGn6q44$I0zS4X2wgR+f!#xIMifBSD(>O=OpU>=~qj>lY8X7_pWK$mQ|dT#0? zxGm^~&ZH{Tiz!AJ@Udfr*I91k4e3T7p zb$8trj!qvaE34(bY1(5cg%Q;=Xr7vlyvpk#8d=v*s4{wLlAdIZ7mk#J5hqTx5q8G? zEB9Hevb*B%CHB@ZD7GilW;ciHvKBH7ZTRg|JSwxpl`|M1NIenmDU41+ck8(dgo6^% z;+Eo@H9HTY8e%RZk{eL;>W-g0#pgat$EAtDo}HGxa)(Lrs%ICERLX+7k<~h_ja3|+ z;tlXCH@-8-$-Py}bVEzOSL}0_!M+CF@Ts%Qxq!B~8R}MAEOSY#y`r@1DhQS9S;P&s zHPq|K<6d0cjdWe2l;c4xb^E)6nQ1q`Ub8xj&8{RelEXz8j5~?G`t>+$PS5)CMnb(U zGkZ}A@s&gd$LU>~)dA-B9ygXy9KF$VzZ>BeznXRKkZLzCp(MYwN9`NdJ0niD*} z+D0Tb(>2LP5UE}m*`&k$dwkp~Vmqm9>RsSBW=uaztMsMnwlc4{a$s!1k+V3a4LPL? zJL_9da5Sk=_a2#7Bd`(XIcz+%;?y&ko82S>q(@vdBM`c&lr z=Qwy6Y$$O`d3&C;lUY&YGfuk1>ZrSEk*i{z*Fd}m5HAFoQo#ZB(@J{IHnWpDd1SjZ z&yAYjG0V8oo%u}lu6LNqlqOkBhgX-^czfYV;>=2xp~unknxj}vCq2}cJuyQe+}U-# z#~{o)F`ALrWNi52=L1WRhD6Z|$13mS+;>G@;)LMFnZVZ2Y5`AdL6+(xsc1Wn_lrhP zB=*^bK#IDPJbXEpqVL_=ir(|p=RB>JX+j!h^Qi(3ycu_qkG=QOqeA=Jhby+fYHB!8 zyRwVKbuq*Be>{8Ori?Im!w8WAAdJZD_3E3Z)`5VAQ|h!2l4+GKnU46>`3)h2t1u!= z>kMhQA$ywL^Q`BQld+N?hXyZugnkrmRvKyw#lN7Ck zLS>SLhqg6GH|A#x9;8!*PkLTW?$!UoW3YaiyjMT*_kno!oSEp!6BzRoIi{r4&3VIu zv2^77yh~yQQK9P|uC;o-+_rx3`@J?BC1MJJhF=D@+^LPYIA_Zp-OX-JVU5D0d5y2u z5yzG~&|>H7c0H_lgsFbEL}GW4{Ys)9;pEb1LhuuMHqBjLa2Bte{pelQWl0o7Hje?_ z*JZ1G-ePYO&Pk5n%8{wfurtaE?iu*dI$_X#_042KoNU=kuzLL~RJW-b8z-GM^Espw zsBAf1F?+MFu~TTwh%#Zw#Le6u^$>G@KCvhDXUL=FIK!L-ao;gN&HU6a=U*F9xYr!` zT00)WC|^Hy6uxIod1!XRT2pbsC;DcYd_`6lLgs6{y5IL>TAP>a4r8wT zYzzaY8GH5dLF_0d>6#9}d75|<>%tz8o`u(qp*_t;USr!{vxTWICixeI z7*8=GVDkPlC)&+T<~Hgi?xRxI4|wr+L$7`fSZHraQw}zLM_4m^rh)!D#Rm(MuX*SG z`e;+P59mI62HW)V>UY6iXXOKp4%F##{3CZM+blY}E0fgmr~iL^!B9Co(`0d<6Zbaj zp`3)PJv3nIVr5o~JD~Yg(x}Jss^?i@f9WbS#`WFDQFeo#z5jZMu$-&-+jxI$IS65; zu3T5XpIE6Bqox>oKD7Bf)L#40-n3^0-9#_&=69*btMLYL)3oX`xCt}Xb%AR{lZ;j(xL=oR)75t463hWrJE_DN^ za#MMzJ=`||Mp*mw@#tl!xL&nBjd|kbv&3KZc7idlBImBCn7@c_0;}KoUP!22YR#^) z6%7r&_utcrzs0ToneH}w$jS7Cv()!6F?n-bQF~*KEj<<@u!mCM3asA*y4QqUx0#K!v0=cU1ftZDuYK62KRJxj$m$@^mvj+J5CbjQ6N&Iio2HbG9-%lK2G|YQ_ z+cvjPkU#YN_NV^)(!Z$dFH*R{hpO9YN%EPQ-4UrSmu0P^hejwLnpPOH?oH5FNm zU|o>fS9oE&v|=kOy<|o+#}Sowsc56zRQIa9gJh!@eQujuPWfF_y_Yq4botBX^IJ0T z+Qklp3*=pn^?c?OoVMl$W>UEuFhuE^O=LK7aR@+rE5fzydAg)}+)x?e#<-lgyhtUL z)?{aTffoDL^*YwH^4OL{ZL1QW8#J>iaenWcbyVvt+e+pSlTIeJXxhDF%do~U{haD2 zak`nEpJ%}ME6k1>&U1NH_ndhiVgy%ROL?KYg`A!xqPuAeas2K%BbmoW`}(YFn^)=N z_0((aNM+-9zj4-e>wRYAFY@R9zxNAU{x<)J3f304L<{$gV9vdI|D{*VtDl~K@cYem zbBr_yR%Z>6XMcw3`H}bH+po(>=0E)9zK82P&ctB#-t}&NugJ#R>NyTPpup!z$j32$ zLMGVLM|x0jaMOkVf0%&(^6OiVKkMKAo_`-p*S|bqhc-HYrAfkt`L<^-P){-b`?oOB zd+7=;ds5NAF28#HSK;*@X}_(P^<``Dt>!ylOS0xS)AkQN)8gTVYtPp=)~{8yaXwCY zNuxN^=V)uKJaFFmi{@WTyl*6X{cGNv1A5G-%rvj1?}7QRiI(38+f|3X{&`=TUnL*+ zUu+b@59iO)JHBJDtm3}uh8_&m&T`U^+Pz|Lg6elvm)BQm($=2iC*OSH#5P6ZH?iIE z%m}Ncm^s_r(`B$kg0DAzXXh`g=IeNu5Kbdb?|o&ADDC~jk-z%4Sd>4@q23h#7fjO8zKBs&7occml{&Krv5`L+5eY<|EvLn8kbU#+I9Q=2-v~rIA3`eW_ zTUu>bp8ewSj3xSd`OhIp;Ld&EeD65U4EKl=<~5iEGsC^HJY?pNcex+kXE}!yemNLnn{sbLrlH zDK&q;FX5jwbsh3t*IPfO{e9Q{|M-7+9ScA}kM=_SFG%7~`AP@i@!@c&BXi~46XUUot!$hL5{M+mhXpWtn10ubnpwVLiftJi+6BZPIvj>yn9 zlkg9o48@JxwtHH`<s;ij^Fx5p*2234SCIjv5cO&=5U>f|R;s&WT*8AOSzgc%8GSk%;qVp( zB4v$pq|K`m#V_A*cYQL5J_1=N(kEsNU0Bqb6ClV;lUu6$9^?2*pGre zT8SdVj7ZS{oSgI9YzMeuas!(VzI3oLYzfI@z34xi95&1Oi)bD^@5hHE6#7Zp`g79d z1N|jKg!8;TGt;5w^DHB;-?kNgs3$ z{^9#|_w8!^#yxty{v2WOgZqETK7a90XZ!y#4@RFz*YY{y%BtT>QJ;{|czDQV?bD7mvI% zOWj)zhj8=k{nc1*&Jqm2_ZMT=X*A)`sAnkHs<;*_UX!OK!V z#{O?2{*!56`yGg6=H3@_jWZr!k|VHi$S^oz{eM~1l(T%b;HWR@`?-c?1Apz6Sj}S6 z$X^j?GyGN++)P1o*JDnxDgLW^oY4?+(HW8B{kp0#zk}vKuXlsaPvTn>q&&YM$gYJ- zmxa>CoQt5sUH(k)z+obPO2+KHdMs9z&GfZAU_ z{O`~BYpfi95pp>a%TTxY zDl`A(o~NkCPIcq^(80r^*k&He=7jqYIpQf%5{WApnam7Up`k_{))HrW!e^L&JZaGb?iwf&XVurHo-ge9K#s%hvUZ&Dl$bm;p~lE78U-i97$Y z`_Axt`N7b-+P|d$@MR5i7Mv|US+ji6jL&10H(TBrn{`>*sTYG}5Z({O1Hh}R*Y?Zm zSkJqw_RTrQ+&gj|uTonOZZg-4Cn*9kM(m;GG0qS=(5kvC3Ktzc#fR1I$~2aTC78V5 z=LVcP!!uy3-hFo&GJ3}|UCw<(Hx^`Uvt^@@=Me>-ChYfj_IKthcpUGW>&nKDY>5c> z(Z@Fw&eO`X0!airte7%))|}DedLf=Z<_o`YJrec2tIsw@Sbm+J370n{)x?*3Ct*Ef zmDY0GX(I1sY)R&;a?7rf%eO0rzuk{26ln{xduoFf9?PH#}}yYI2PmD9jfZtZZc`Ode?d70A*W}yW`WUNaK zLq%P$d%z*)8?1kibMIWeaXV`)^hY|*V55&c__$}yQ!Z!MI_|4G-r0lT_2OFcA2>%; z?;0(Mj2w}}M-#Qk5VJjk&8CUv?s?9x9hvjpzk9(nQ*vhvd($at)E~#8bVtF^Pc}L`sjIBT+MG+@74x$ zg39pS%w4dhXWY9^vrs(NSiCE`eF7Ajhq@iW6w(zst|kjF7%MBo&Y*8KEiNqB|J~iq zUx+!vz-~qz77j|4H2F_W%83iZQuT&@9R5|PvVnWr4%Ec>?wW@mm0cP>bk^CdAKpUW_zap$=Y89HS;3EbMH}%NW}S`?jK9by~k8y{>tWs)!U} zhps0uBGQ=%ehGmPeseTTT@1ds{qeRbxpBMBuI`;fZ;;Cw zc?O=aaTvWc0{6^1e{R`ancTw(zH-~#jEJJ@P3?xmjc2GvOYm z;ZD!guHKqW`i?oNuOwWJ>0H+}5a~-!U3rLiV`-yEUU=2u*JZL5pPCZH96OLB#QjI= zUajjan9c;A^!OUZm9bF`q4M%YX9F94En&3%YPNjOTy`T|yPU4;b`9giVh4qEFExpu zDft7G`pwUf`pLwy4ngx9J&cpv2O{}wWy=$tX8-VJF^upvtNy4)%w_Ondu(O9#Wnl% z;k^y&+~>B{)aj`^c-`-Rs_Z?mNuCuuzY(w@7xJZ9N#<~8uovG&^`nrA9 z8tX}vp}vGXZm;fnLV_DdUM#cme%P6vVNui>%g;zw61s(5JP`pSj+{&0KtA;)nrjz< zSA64rH*;yezMW(sOw)I*JLb1MOtYdNH_5b+GGAB2$1=?+usH2d-N#Np)XAm%aao-A z`up|s=68*2M0@_u`ndXkao)F_{~f>F?;REl-O~+qUEC!X=8kAf=CEt+ckcnevQr&* z|C#5J*&4cTGaaFJlNT|V?`>J_&1Aq8bmJQ@_6@3P9~vMRoe8N z8P=xplUTp5Hs^Sh+2UrcZx70r*{3@Bhc$Tv-8(pw}xiz zE|*2DMy01hWjN_^^H}BZebx+NL(}tfHs=c8y6=B@<8Dg8PQ7c~h?!fA#AlbM{V;#b zkLCdXybpQ*Xdm(*fJvy7{`5oid_C`9uTwpJKB?kARnPc8><1s``Sd^fcH+tZxBY+H z{&)R<9X9{fh9B#;H}XI8a3A^m)Bdv&P)LgrL5ODF`*ZnU{dSxr<#NMbwEw(x3;UIw zg2lS&o2X)Qq`JFw{$k=gG{Z;TKwtln0C~;!>|XoIb?bj$>-)aB`u^we%MGnR6+>i( z%ok*-*R10XBI>GFS^g|5IKrWuNk4M~gvM~!cNvIR`DG3gZ0}3Oxb$Z!zEJTh2l|@B zUy12GKEy6w=X>+tcjMvz(VeOm2l+S;{mcOV$I<`J_&#K^uIGD+7oL(Cr6NP2ALSa| zOqHM=(;YD&y)q+fPki}OH>02Sr^WH@pBtu~#OFu->500ai5$bwKK<^Njia z?;?&U;~boZcP9M4FMnCi#FZZ9H@bud9 zwB0+CC$j5_4>z>up4%wz1a#li*x5&S^To-TNs3*L7Vm!}Z->y2TOJzPjhk_;&aicj(2`^}D!*cBt|jsg@_AvR&sUE@_*N&ZOQ> z)m253y4&29j8j4&|4^?Y)gH;i~4@uK1%$exB8@5Xxi-%q4i7Tz@$yn0#b zaJ1odr(+mx2<;;ZAUz)W(_XH1I-L|wjE1(ET)l0qH8z%aanyR=F67r5&sT0MomSRs z7m)e5_T5tN7w3EV|9$aS`_4G;5~8)Lk5rgZ$7?Xy#8$C;vktO+!;={FYE<7?%VsNV zAEb!rjdb_wWpfX9Qn9?@@A)k&8n!cju!df5%;ELWY<+jsy1r^4CDU=4nV80L-S#3wy|QwW~N9h_P^!&8$bG*KV$v>?f#v2|4o^_|K`_D-cLNfAH#F~ z(b*6EKd$b;j)i}xfAzPS?)JkzI|2X&TEGtf`TKet0}yF}&;IO>fy0tO8|VH*U0!Vs zZX!b28~I)V0g~f2krgurPD3pe1ZfpPsRbk<6f`1>u{sm~hyL4#c>x`!WJDodk*aio zDkV(2ym?z{~c%8MwY|KTwpWLrQ|up3dODxaTzkpB;L_RQXM zS9Qxmb7DTziUX^)4-RBTx>s?HUKkrV6mOpOuy4&L2pn;F?seaEQ$cNyg4WtrS7lr% zxgMcaWCe$0Ua>rcul=!;88&aNG&2#m_4Pk4_RW*`AEm#Y+9V%3qd!OZGERs8x7kZ> z!-P&v58lktBP zKYkVpvLc`>jbeQ67#=@#hpo@ZfGQW#E4T_88!#g= zN>YKMfTEa`qJfAXI1C}Rh*B9WNHGaP5JV*fG*Zw}6HO>%RIn5=L}XzZF$+me076bo zn=a4Mlt*%D8L=Uybd!l($XhASoz#e`h~dg*T&kiXg6t!zP*50(qJYT4s#++eq{$j7 z1Y*#ts)Xr;T!a26^gq)!!uAJpbDuV7r!J95VJ4#>X=bQSVc8iZImm*ni;-Q01vOYv zP0rF_N1O^Y3Mq)H2x2KIr6Meo0IFM9>O`c`2})4prcnhY6wnLK(i%<9IW|*eGSmY$ zP*9>%bF_d=igE-d(}k#mCH0+3R^>OW2%1o(GHX=KY1&#RLMY{S)3yHH_&z{Y$BGFB z%Lr_VGqRF=K$+S_z_JUAm4xW$V}X#UwalaeNGwwtfU!hj`cGeHAh28hLf=x$>EzJ6 z3;!X^c`O(fAe;o&bSQkkN%Vo&^8?d)&2#UQ<8RaUr@ya1O? zTu3NLswhc?z`02=CTjygEGsxtPw+xeY9gW;swAq2iKvJ43EV{L115?E$P9`aR*pJ&we-8vnQp$lO(9frWUeP6#eNiSSB$@Q3M|p z$t5RIlojfo7$_*7;iu2P{u=jhWlpuY`L5R4O6gB<6m~AH;{upfWA@x(& z#zS4Y6|7~n#01iuOw2|RL6eDLE_>MMXoxRN$ZCj?ih?StAtZvI)??7Qp;7*hmxyLk zW)+Lo-}M8@f-p!*c<`g@Z$-wpRZmP0=P*w42zPJ{0_`yeS_r`q%R zZfb@DKiJat*yZ*7a{c;lE(n|b(|T7rzrkM4^I ziV6suC@8w1Sv2jxy$d4lrEsT^T_DYbOh^70`?2|7j(!gv{r<=6py#Jm14*b42*_f> zgyg1#Q4tqtaj>;gL6S7nK}ksAc*&@snFyK)r$zeP{rSVeAn`cD=#-+8rZB`)hN@ny zWo%JYL;(F6(Zljj_8;3n+9GeG^Z(i{B zL9`lLDp4do+0N6h9ZT)^wU;MlRNM0VVY4p1^N{HE*QLw6CHCHH9O>VqY$}l%{j8qGbWG}V6&cMLH$Y9@~z+lM3aO6MGhhW diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7102c7f297296821114661e00e5bf54d0891d105-21 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7102c7f297296821114661e00e5bf54d0891d105-21 deleted file mode 100644 index 5aafd5aaf2007937d5767840462fa75d5ac898d3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 76 zcmZQk@|Ey)U|?|4^5tPrXB1#yV2D4RY}6vb%pJ@}( diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/71a24ce771fb7f1a4163e57a478c3044ad42e62d-24 b/vendor/github.com/pierrec/lz4/fuzz/corpus/71a24ce771fb7f1a4163e57a478c3044ad42e62d-24 deleted file mode 100644 index b80bd7ce35ccaf4f7f087a3d6505b2ccc938ba60..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 zcmZQk@|Ey)U|?|4^5tPrXH;NdV2D4RY}6vb%<%s|1A```SmG`Qh6V diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/72c738d7492d3055c6fe7391198422984b9e4702-32 b/vendor/github.com/pierrec/lz4/fuzz/corpus/72c738d7492d3055c6fe7391198422984b9e4702-32 deleted file mode 100644 index 00aa56e448e13e99007baea3f42163872a88ef6e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_q-%hCBuai99BT|Bej8Kmnj&3PVH9|NjjP3}8Yc LPZmiSo1Q!X@`)8f diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/72f032947602f1be74f01c91165c5118121f36c7-24 b/vendor/github.com/pierrec/lz4/fuzz/corpus/72f032947602f1be74f01c91165c5118121f36c7-24 deleted file mode 100644 index f7f621e0abe3fb320f9ed6091f8a40f237b3050d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 wcmZQk@|Ey)U|?|4^5tPrXH;NdV2D4RY}6vb%B=VRT6c`zV85kNEK>UUpkT4TeP8KMj O1X2w|5E&Q+RR92}bsRnb diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/767d1943125a0f6e9397779cc757c9cdd1e05631-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/767d1943125a0f6e9397779cc757c9cdd1e05631-17 deleted file mode 100644 index f9a81c35d8c47fb38cb61f54480d3a56b9239a69..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 62 xcmZQk@|DO-Y&o%a|NB@5Mg}GZh6V`+VIWz9F091B3ljeSzkz`POi1K00RUEC6Gi|4 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/76d22068e2ed4a5952d4adc7ea8dada5509a784c-13 b/vendor/github.com/pierrec/lz4/fuzz/corpus/76d22068e2ed4a5952d4adc7ea8dada5509a784c-13 deleted file mode 100644 index 0d9259c3818c499e5cf3adbcf3802062414083f4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30 icmZQk@|Ey)U|?|4^5tPrXJlhwXkZXy0P`gno&f+n83b7X diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7740102922cb9933980bb800c1115daf38edf654-24 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7740102922cb9933980bb800c1115daf38edf654-24 deleted file mode 100644 index 228afb56fb3fd900430a8df0a0e3ef548f4f7eef..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 72 lcmZQk@|Ey#P+GZn|N8=^=qn6Bz(TZ;Pi9F8P$Li}0su^P8GZl& diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/783270b1e353ba3895b7d0c4135b8592e22f6508-12 b/vendor/github.com/pierrec/lz4/fuzz/corpus/783270b1e353ba3895b7d0c4135b8592e22f6508-12 deleted file mode 100644 index 148d9721f9f1e8116472a10e82a06f0a5608dcdc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 56 ScmZS4@|9o!0u~ZzkXitG!~~fD diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7851a406571c6b4c1aeed0af16db8c48444c3f2b-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7851a406571c6b4c1aeed0af16db8c48444c3f2b-1 deleted file mode 100755 index 2f6cbc18ed18bde0aeec327a70ce1f7c5ae78177..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 34 pcmZ>Y$}lu^j8qGbWG}V6&cMLH$Y9@~z+lM3@aUb#axNi;cL0~+2}J+^ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/78981d313038119ac4f7017349e50a1cba56b382-7 b/vendor/github.com/pierrec/lz4/fuzz/corpus/78981d313038119ac4f7017349e50a1cba56b382-7 deleted file mode 100644 index 74d78e493891c9eddd04b7884d66db2198a7714b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23 ccmZQk@|DO-Y&o%a|NB@5Mg|aIjXlEz09d02pa1{> diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/78c88c4afaf5962056b1aea720509b9f6f286b91-15 b/vendor/github.com/pierrec/lz4/fuzz/corpus/78c88c4afaf5962056b1aea720509b9f6f286b91-15 deleted file mode 100644 index 33bb48d49ca59524c6865f326b38abc8e0dccb62..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 64 mcmZQk@|B28Y&o&_G6MqxBM>t%uz-am&I@Y6#UMi1Q~?0N0u0>% diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/78e59daada9b9be755d1b508dd392fa9fc6fa9c2-27 b/vendor/github.com/pierrec/lz4/fuzz/corpus/78e59daada9b9be755d1b508dd392fa9fc6fa9c2-27 deleted file mode 100644 index 21ad88cce77ff8addbb52f93fa123ea9c6fda6b1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 156 zcmZQk@|DO-Y&o%a|NB@5Mh0F628QDQ{~H(>B=VRT6c`zV85kNQ3=J)f3@nW-EKN-f z%z-Kx!0P`02Ww#hawWh5HDD=;JO%~@QzoEE4K+Zc4z>% diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/79c5ac978f5aee35e123f523369aa46b1d0a995d-11 b/vendor/github.com/pierrec/lz4/fuzz/corpus/79c5ac978f5aee35e123f523369aa46b1d0a995d-11 deleted file mode 100644 index a5d3bf48c3046dd8ff77c573984a1ca76ece939c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26 ccmZQk@|B28Y&o&_G6MqxBM>t%uz-am09F45fB*mh diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7a0fc8dacceae32a59589711dce63800085c22c7-23 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7a0fc8dacceae32a59589711dce63800085c22c7-23 deleted file mode 100644 index adcd6885aed71fac911c112900bd8b2c30e95006..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_q=5{~H(>B=VRT6c`zV85n>Zpm0MCvN~CyfD%X; NECbfiPy?o*ngK+W8b1I4 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7adf4aa021efaa953268c817467959fa3c42ca42-13 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7adf4aa021efaa953268c817467959fa3c42ca42-13 deleted file mode 100644 index 5959a250beea437da8c5b7ba9757b329d3e28d44..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25 ccmZQk@|Ey)U|?|4^5tPrXJlhwVE7LP06Q!Q1^@s6 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7b8c99ded96973a6e8f523bc1c6ed4ef5c515aa1-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7b8c99ded96973a6e8f523bc1c6ed4ef5c515aa1-1 deleted file mode 100755 index dc224e05..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/7b8c99ded96973a6e8f523bc1c6ed4ef5c515aa1-1 +++ /dev/null @@ -1 +0,0 @@ -BZh \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7b919213d591e6ce4355c635dc1ecc0d8e78befe-30 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7b919213d591e6ce4355c635dc1ecc0d8e78befe-30 deleted file mode 100644 index 2c7bc27602d32afff966248dca4b54d84b84d19f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 66 wcmZQk@|DO-Y&o%a|NB@5Mh0F6hK3qs0Rfi_@% diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7ba80199cbce9a2eb47da15f0c62fd1fb8fa67d9-3 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7ba80199cbce9a2eb47da15f0c62fd1fb8fa67d9-3 deleted file mode 100644 index 7a425b8a..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/7ba80199cbce9a2eb47da15f0c62fd1fb8fa67d9-3 +++ /dev/null @@ -1 +0,0 @@ -*M4883 \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7cdc0917ad63ce7a7c98301a366c31635f0f099d-14 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7cdc0917ad63ce7a7c98301a366c31635f0f099d-14 deleted file mode 100644 index ce78b27630ffd1823dd1a4a10c5fc34ad90b0080..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 lcmZQk@|DO-Y&o%a|NB@5Mh0F628R0oAdt?^$nZap0RYCu4ATGr diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7ce37ad19bfe9f52eeadda03e6b8448e5bf57800-3 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7ce37ad19bfe9f52eeadda03e6b8448e5bf57800-3 deleted file mode 100755 index b4ceb19f0f0e7a81ccec898d3f965f0073e78c8d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1452 zcmV;d1ylM$T4*sbL0KkKS(G)X0|3c~|A2H5p~Qdp|M>s^|LQ;g|L|hUKKsA`6YlAD zc`I*Yq>cap03SdA000000Pe>@o%ZE+&|9wQdZK`RbC%m4%MG;|_Z&S3uFrNr00Yvf zPytkx3ZMko01IFM1MbBD=%4@+NF%Ic3;?8kv>bPxFFi;`@6aWAeqKW_jHKKq5loTBl0MJoVC{aKaD`r6|I>_9? zM-U21B2p*-P%g%l=nSX;C@ZZ12HN{%DIf&^prud%0HT0U0)POJ0HHt>000F508@wo zs-*y+P(lKPiUD5i0011Q3ZqJiAt#e00)2o9%b#m03!C@cI|cX z^ewwOgP}kG00jU5PyhfF00000KIUddWzUy*zYj(XS zo#2k@&yew7MfZI4zGbj5%K6?ETfXnR=e?&s%=R5=*Q$L!xO%M*Oy+NSp7(1bO*@-! zeNVjg!`Ha__?%a};t#KV*Ig4ehLd#HZe5%WSj~3RyPd!h+Oykk-8A;c00%OJ&5)RG ztzZTyDyoWr1qzf8ce-*`#_b&`~O?by^sztE~1O3vuo5Rh`LsGW40~*GGGf zPkZB|RQR@KQ$s-R(QfYVxO>gByB(iRb4rwhNl&iel{AKY-g)hu*{4F9(z?@gEw^V} z*k>>R001Z(;r99-PrD}1S6tohk5Or+g!>WWuW9l?;k?@!?(**4fMbO2WQ3SjEY|`r1R~S-8op4hizH5 zZJVK|2rd%0M72}MN_ly03Lu262{qixapuq!IDEzDy6p7ni5vE zvAVms$OAwqHg4?L0noVH4FKD=8s*Sk7t5?i9^UP%z3b8Dbba^F2hWYOH{EvTcYP0f zCNbRm>+Apk0000!$C&xIS@S)4>^Ugzy;~{NwKG>SO?}u?-^ zYoHD4qPvZGO1tfYRDJWhUER|;dA-{3yPL<#rBr|b$7zpvoxMJNA5aw?>zhca6KxH7 zeeN~f01B<{6~4WFd+yyc)y>^ERz2)Q8e0majit0`DFGT1=yEP;Q&{Xck1STz7;T2u z0ZJMhd8O}XJH_ForBPGT#_v6DbiBKCLidMCgSG(i-E2rb0N1CX>GlKR@1~Ri3i+WN znwxeQcJ1!PQ?Wj}`p3q7?Fqc-TjRM6 zn5%=UyEmMpdvxS-dE2MDX7*z@x!-N?yRB{&w(fn`nmcoMZEyep9RL&n4u-0(==*Bv zZvlY_pIQgEv|#k_Ub-!sYV3F3-F){w(DUYj<|MOax&;6R<#2euGN*3La1^9T%+}iO z?G7oq_keq?2iqmq54P*O0YFdy2J_0R%t|Fb=eN7P+2?uNw>fR*#qV`Zb#{ty4$bDO zS@+O4zTdH@9gy?Rx_gJm2Ww{7>!eaL;wXwQ-&hMT*9zW5v9^Y43I zxLc>YyAMoK?X2w8RUHV{rRmw7cYOdf3(p&a*_F1J(>865RH?AlX}fjRBv5yydcX(UP7O~8Ktd4^K!6a_AsGbIYMG=B28i~j Gr9DhoExbMe diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7e3132012be223fd55e5e7a7fc2ea602361ed2b4-5 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7e3132012be223fd55e5e7a7fc2ea602361ed2b4-5 deleted file mode 100644 index 010f13fc267fad0d5cb5034505c94c2a6c84acf0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8 NcmZS4@|9o!0ssS)0F(d# diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7e9a88118e4c41e61f5c501e6edf9a5bd2432be3-23 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7e9a88118e4c41e61f5c501e6edf9a5bd2432be3-23 deleted file mode 100644 index de4f0c660699169e1b2a79be0f8c16f4dfb89dec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 135 ccmZQk@|Ey)P+GZn|N8>%=qn5i3=IQ;0d}Sp82|tP diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7f081c89cfb6344f4aac5f813da1fd15f8bab022-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7f081c89cfb6344f4aac5f813da1fd15f8bab022-1 deleted file mode 100755 index 16410b7f524dfc8e7a3d86f278ced1f5d0bcaa4a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26 hcmZ>Y$}lu^j8qGbEVaDOz`(%B(4fFz$Z+IPEC5|Z2MquK diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7f8c3b163798c8d5e1b65e03f411b56b6c9384bb-28 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7f8c3b163798c8d5e1b65e03f411b56b6c9384bb-28 deleted file mode 100644 index d4eb29f0931907f280276380bf86a7a50be33a71..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 124 zcmZQk@|DO-Y&o%a|NB@5Mh0O9h6V{kLrWtAOCt+QQ&R(T1`yx{;{X4_Dwu#w39vv7 tNQOZokAXqKlnJP!p$2HuVVFrE0g#EZK-wOp9<1X3{{{vIqZ%;v9{{vJ9cKUl diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7f970f16026c689c096a19fef1a3282a13ee69dc-20 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7f970f16026c689c096a19fef1a3282a13ee69dc-20 deleted file mode 100644 index 42795e94d20f877ebc25af82000fe420e4319766..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 65 zcmZQk@|DO-Y&o%a|NB@5Mh0F628QDQ{~H(>B=VRT6c`y~85kOrK*B)OPy?2MQczU@ DZ6_1x diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/7fa96d28faf45062eb803ea84a334b607e966f90-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/7fa96d28faf45062eb803ea84a334b607e966f90-1 deleted file mode 100755 index 28962b057cd816316e88dec33d065eca5d1591ae..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 31 mcmZ>Y$}lu^j8qGbJf6@R%)r3F!otA8(4fFz$Y7wrdY%CInWj8qGbWG}V6&cMLH$Y9@~z+lM3aO6Yz`v; diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/8261f0c1799ca71c411f6d3f34069b25dac8b739-18 b/vendor/github.com/pierrec/lz4/fuzz/corpus/8261f0c1799ca71c411f6d3f34069b25dac8b739-18 deleted file mode 100644 index 590b0f110e19f85a6617943649ab8c7799a14da5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 52 scmZQk@|Ey)U|?|4^5tPrXJlhwU&j9?kU#SB{{m>C!vQp6qu0N4cy!vFvP diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/82a499521f34b6a9aff3b71d5f8bfd358933a4b2-36 b/vendor/github.com/pierrec/lz4/fuzz/corpus/82a499521f34b6a9aff3b71d5f8bfd358933a4b2-36 deleted file mode 100644 index 945f5fc94157b8ceed5f4c3564c7d234934305d7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44 rcmZQk@|CDdY&o%a|NB@5#_hZe3=K6Bd2j&+`)!$b8GwSUv1gb7cB~L5 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/82afa534de59025bf1e3358919286525ae7d3347-2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/82afa534de59025bf1e3358919286525ae7d3347-2 deleted file mode 100644 index 2fc2c285..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/82afa534de59025bf1e3358919286525ae7d3347-2 +++ /dev/null @@ -1 +0,0 @@ -*M \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/82c627991d65c5c4e88c9ccac39be082cca40765-24 b/vendor/github.com/pierrec/lz4/fuzz/corpus/82c627991d65c5c4e88c9ccac39be082cca40765-24 deleted file mode 100644 index bfa3172a74ebcf89888ddcf89a21bea68389c4cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_rbL{~H(>B!n3lfD}-mp$1unEKop+0ab?)Lt%NaQ89002kW1&#m! diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/84a9bda8369d33ffe0d6f520c24331ae64e9dc88-3 b/vendor/github.com/pierrec/lz4/fuzz/corpus/84a9bda8369d33ffe0d6f520c24331ae64e9dc88-3 deleted file mode 100755 index 12276fc7a588dee0efaa7cc1c0d60fa2a0bc0367..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10 RcmZ?L@|9p4000SA0sH^} diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/86513e3435adaf7c493dd50eb5de372010185e36-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/86513e3435adaf7c493dd50eb5de372010185e36-1 deleted file mode 100755 index 15e4658e..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/86513e3435adaf7c493dd50eb5de372010185e36-1 +++ /dev/null @@ -1 +0,0 @@ -BZh8rE8P \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/86637b211f4fa0118ccab9ee193c66286126bb5d-20 b/vendor/github.com/pierrec/lz4/fuzz/corpus/86637b211f4fa0118ccab9ee193c66286126bb5d-20 deleted file mode 100644 index 32b0426952105e4f64cb7ccfc49a75fbd176e95b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35 ocmZQk@|Ey)U|?|4^5tPrXB1#yV2D4RY}6vb%y9Y7e-N+%0DeRZdH?_b diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/8695984335fa005895377a8a60000a921d7efd99-10 b/vendor/github.com/pierrec/lz4/fuzz/corpus/8695984335fa005895377a8a60000a921d7efd99-10 deleted file mode 100644 index 4b074d949fbbd9fa4a3e4cdb3a5cf5f3bf7cb6d8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25 fcmZQk@|B28Y&o&_G6MqxBLfqVl*p4ed?5e;Qs)Kp diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/86baa53eb98a9a342b0d5b79dfa5c58aa9c1b05e-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/86baa53eb98a9a342b0d5b79dfa5c58aa9c1b05e-16 deleted file mode 100644 index ef5234f697fda64b2d58c1fe2ef3cc7b8093464a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35 qcmZQk@|Ey)U|?|4^5tPrXB1#yU&Y}g{f%)r3Nz`#`LlmY-=6$OU? diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/87caf7737ebb025ec2d908224818ceb2bc76b658-28 b/vendor/github.com/pierrec/lz4/fuzz/corpus/87caf7737ebb025ec2d908224818ceb2bc76b658-28 deleted file mode 100644 index 10431de1f82c962d06f66a6447abc75de9fc48a4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 156 zcmZQk@|DO-Y&o%a|NB@5Mh0F628NRV{~H(>B=VRT6c`zV85kNQ3=J)f3@nW-EKN-f z%z-Kx!0P`02Ww#hawWh5HDD=;JO%~@QzoEE4K+Zc4t%NB{shmjpWi diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/8d70b7de160bbef22ab46f798d687a69dbda772c-5 b/vendor/github.com/pierrec/lz4/fuzz/corpus/8d70b7de160bbef22ab46f798d687a69dbda772c-5 deleted file mode 100644 index cfba1be2..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/8d70b7de160bbef22ab46f798d687a69dbda772c-5 +++ /dev/null @@ -1 +0,0 @@ -"MbT \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/8e533f8a1e58710d99d6b7d39af7034961aa4fbe-5 b/vendor/github.com/pierrec/lz4/fuzz/corpus/8e533f8a1e58710d99d6b7d39af7034961aa4fbe-5 deleted file mode 100755 index 049f5296..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/8e533f8a1e58710d99d6b7d39af7034961aa4fbe-5 +++ /dev/null @@ -1 +0,0 @@ -"M@" \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/8f0d2862c49eebbcd473a38c8fa1e76288f47127-26 b/vendor/github.com/pierrec/lz4/fuzz/corpus/8f0d2862c49eebbcd473a38c8fa1e76288f47127-26 deleted file mode 100644 index c7dd425011b7d2e0682ae8c1d8f1f623386d2e41..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 zcmZQk@|Ey)U|?|4^5tPrXH;NdV2D4RY}6vb%k-M C_6rCA diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/8f4788d30edd22ebcfef0e52bbf9e8c3d1e8d7e9-27 b/vendor/github.com/pierrec/lz4/fuzz/corpus/8f4788d30edd22ebcfef0e52bbf9e8c3d1e8d7e9-27 deleted file mode 100644 index 4cc2e6ed233a656b59cfe16c7110de337a5fd63c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 105 zcmZQk@|DO-Y&o%a|NB@5Mh0F628QDQ{~H(>B=VRT6c`zVfdc;x4K0lfER8HIO-&8V efdUL*b^nny$}%uC*n@HRx7?>Ctn;9FLmFA^Z6r?7Xqyhk%jufo` diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/8f7a47710904981ffaa1fefa21fa95fd2d818487-7 b/vendor/github.com/pierrec/lz4/fuzz/corpus/8f7a47710904981ffaa1fefa21fa95fd2d818487-7 deleted file mode 100644 index 107b0ae8668d3187ece858002aac4a35ecf8ddfd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23 dcmZQk@|DO-Y&o%a|NB@5Mg|rJAjn9}0{~cN2CD!7 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/9.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/9.bz2 deleted file mode 100755 index 3ce6c4456d67abdd27062e34b6a1635fe44f4db1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42 ycmZ>Y%CIzaj8qGbWG}V6&cMLH$Y9@~z+lM3aO6lMW;R diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/90a227d3beab730ed6eecd63657f5406beccabdf-12 b/vendor/github.com/pierrec/lz4/fuzz/corpus/90a227d3beab730ed6eecd63657f5406beccabdf-12 deleted file mode 100644 index cea8af6ba3dc7cc2963cbeb8caf14a7ad4d20095..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 39 gcmZQk@|DO-Y&o%a|NB@5Mg}GZ1_p^dbTW?t0K()4cmMzZ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/92197169aded0d5d0407e3925959e922257a101d-28 b/vendor/github.com/pierrec/lz4/fuzz/corpus/92197169aded0d5d0407e3925959e922257a101d-28 deleted file mode 100644 index 82153779a3afb961baf829b5623fb323ec65810e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 195 zcmZQk@|DO-Y&o%a|NB@5Mh0F628QDQ{~K5sB=VRT6c`zV85kNQ3=J)f3@nW-EKN-f z%z-KxKX9_c0tJ*n!XTNN|L^zj1tLbEB8LC}!6q{SbxDBDuK^hVG2axTT%ss7 lEhja(L@zkC1gsy~Ng$K$5hnj{U|=w+0Xh+6GLVqS0{~>oHCq4x diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/924e17974cd194fa756d23394676d37cc3641f64-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/924e17974cd194fa756d23394676d37cc3641f64-17 deleted file mode 100644 index 4cbe3bede9963229670a1324c9a869ffc1ec4419..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25 dcmZQk@|Ey#P+GZn|N8=^=qn6BzycCt003lu2HpSw diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/92a785b5ea93d36e27029e281e9a34377d81ce55-5 b/vendor/github.com/pierrec/lz4/fuzz/corpus/92a785b5ea93d36e27029e281e9a34377d81ce55-5 deleted file mode 100755 index 44049efb..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/92a785b5ea93d36e27029e281e9a34377d81ce55-5 +++ /dev/null @@ -1 +0,0 @@ -"Mref \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/92d41e4fca52311e848fac274144b6293d9260f7-34 b/vendor/github.com/pierrec/lz4/fuzz/corpus/92d41e4fca52311e848fac274144b6293d9260f7-34 deleted file mode 100644 index d2c123a2e6ace32d9a5540b8e988c37352445f58..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 65 zcmZQk@|CDdY&o%a|NB@5#_d`R3=K6Bd0>G}e~rw|O-)VBP0b8V&5SH84UJ6=4RkFH R%qY$}lu^j8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?p@9Mb(bx!F diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/9363b81db6b35e8beebcc32d560f786472829bd8-21 b/vendor/github.com/pierrec/lz4/fuzz/corpus/9363b81db6b35e8beebcc32d560f786472829bd8-21 deleted file mode 100644 index 76c0eb4055cb2242bcf948bed5f0d35d550ac165..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0O9h6V{kLrWtAOCt+QQ&R(T3qvDQ240|W4YE2}pnwuc R7%cPue*;JXm;w_Lc>qKQ8fyRm diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/948b1ce043c82d0cfbaa910b6989a1b35a19b8ae-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/948b1ce043c82d0cfbaa910b6989a1b35a19b8ae-16 deleted file mode 100644 index 8ee35db410ce6e2c7ad9fe71e14a57d52f2384e2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 mcmZQk@|DO-Y&o%a|NB@5Mh0F628R0o|A8o-osr?3#7Y3cFAQ}6 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/9505b43fcbc3139441e35bdaaec138e28af076f6-25 b/vendor/github.com/pierrec/lz4/fuzz/corpus/9505b43fcbc3139441e35bdaaec138e28af076f6-25 deleted file mode 100644 index f33bdb30bb4f7b7977781e6abc11e1f6bf080944..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 91 hcmZQk@|Ey#P+GZn|N8=^=qn6Bz(To@uY^x#2>>y{9T5Nk diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/951bb02c199adb52e9e300e9fc070bf55980b910-14 b/vendor/github.com/pierrec/lz4/fuzz/corpus/951bb02c199adb52e9e300e9fc070bf55980b910-14 deleted file mode 100644 index 812adc7c3481e09fb921605e4d9dace8a04b813f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25 dcmZQk@|Ey)U|?|4^5tPrXJlhwVE7LLasWC@2PFUi diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/955404fe3f375361f5c3be1dbcd28eb9a28f06e4-13 b/vendor/github.com/pierrec/lz4/fuzz/corpus/955404fe3f375361f5c3be1dbcd28eb9a28f06e4-13 deleted file mode 100644 index 6b83abdb76df271b5d57995049075336eb99c289..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23 dcmZQk@|Ey#P+GZn|N8=^=qn6B(BP!S003CD2KN8} diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/955c823909722e2693dd7cea3eadc17833dddf86-24 b/vendor/github.com/pierrec/lz4/fuzz/corpus/955c823909722e2693dd7cea3eadc17833dddf86-24 deleted file mode 100644 index 3dae9dc2554cc2249cb468937cbe84f41ddab9f0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_q=5{~H(>B=VRT6c`zV85kNEK>UUpurO3k7AT+u LQVm2<8B_%Trpg>Q diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/95ca8da5556065f33b46c2c8186c2f1cebb1b5da-29 b/vendor/github.com/pierrec/lz4/fuzz/corpus/95ca8da5556065f33b46c2c8186c2f1cebb1b5da-29 deleted file mode 100644 index c95c989da8be80e3422b66b513a653bce411a994..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 112 zcmZQk@|DO-Y&o%a|NB@5Muu1h28RD&Ad$z!puor=%)rnfVQ6S+WMFAzVQFe=U=CEl uz`zR>u7PWG0E;y!frS76XJDxL|G$BO!3anJCI0_^&kHn}3CNUSr~v@SsUhtE diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/967e50c6c1bc99aa5e7fa07c2de14564f52b0fd3-20 b/vendor/github.com/pierrec/lz4/fuzz/corpus/967e50c6c1bc99aa5e7fa07c2de14564f52b0fd3-20 deleted file mode 100644 index 9bca31c7c2305554a06102f7b3e4d7440e9ba004..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 28 jcmZQk@|Ey)U|?|4^5tPrXXIpHV2D4RY}6vb%+8 KN*f?LpxOX6SsFwD diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/98d40a50ee58c05727777e242ecbc0d4e214f7fe-21 b/vendor/github.com/pierrec/lz4/fuzz/corpus/98d40a50ee58c05727777e242ecbc0d4e214f7fe-21 deleted file mode 100644 index dff3de3533976b04786a2cb40d807de90923d98d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35 ncmZQk@|Ey)U|?|4^5tPrXB1#yV2D4RY}6vb%y9Y7e=q<5e+LXb diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/9915e9bb007bc2c1f3d346123933923279f0dec1-27 b/vendor/github.com/pierrec/lz4/fuzz/corpus/9915e9bb007bc2c1f3d346123933923279f0dec1-27 deleted file mode 100644 index d39f6c7993c4eb5a43551aaf1d800ce3d68f07e7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 zcmZQk@|Ey)U|?|4^5tPrXH;NdV2D4RY}6vb%Y$}lu^j8qGboRAj6z#t~ykZGill30?ckeOGO2mph82=xE} diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/995d50f1cb750cbf038246d6cb0cf8db11d7e60e-33 b/vendor/github.com/pierrec/lz4/fuzz/corpus/995d50f1cb750cbf038246d6cb0cf8db11d7e60e-33 deleted file mode 100644 index 9d5311e86ee3bb8d3b288c026f848dfcee69c422..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_mRBJRm8N$Heg8kwF;91PZ1wG}Qe6-@w2CCM5D? Ok%Yl|@<5spdh!6(zZBE} diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/99cfa74a1fea5d16168dd9efc720425b85e95eb7-15 b/vendor/github.com/pierrec/lz4/fuzz/corpus/99cfa74a1fea5d16168dd9efc720425b85e95eb7-15 deleted file mode 100644 index acd20c4577e786890e88501f4c7ba950926dde91..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 lcmZQk@|DO-Y&o%a|NB@5Mh0F628R0oAdt?^$iOJ<4gkYk3&8*Y diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/9a552bab72f174ede3b9bdb7a663c963fd1463d3-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/9a552bab72f174ede3b9bdb7a663c963fd1463d3-16 deleted file mode 100644 index 657ce91e0d0754c21d2c31586380da1882ba1adb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 lcmZQk@|DO-Y&o%a|NB@5Mh0F628R0oAdt?^$Z$?#B>=`R4D|p2 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/9a5ab6c72a445b3b27129004d2a1a417cd4d8440-26 b/vendor/github.com/pierrec/lz4/fuzz/corpus/9a5ab6c72a445b3b27129004d2a1a417cd4d8440-26 deleted file mode 100644 index f0c11e54da5c54ca37428a47fe3c3fa7624b5332..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_ty0{~H(?B=VRT6c`zV85kNE{x<;08lW%(1DKG= QlLZPWfm8z#R0dT60H|afJpcdz diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/9aa3050cb38a6ad276cb5e5ca0c4776d92cb7b0f-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/9aa3050cb38a6ad276cb5e5ca0c4776d92cb7b0f-1 deleted file mode 100755 index 9e58f1c4..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/9aa3050cb38a6ad276cb5e5ca0c4776d92cb7b0f-1 +++ /dev/null @@ -1 +0,0 @@ -BZh31AY&SY \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/9be44693435bc6c51980f30418bcc690d8c25fe7-6 b/vendor/github.com/pierrec/lz4/fuzz/corpus/9be44693435bc6c51980f30418bcc690d8c25fe7-6 deleted file mode 100755 index 45ec57db..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/9be44693435bc6c51980f30418bcc690d8c25fe7-6 +++ /dev/null @@ -1 +0,0 @@ -"MrSf \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/9c0420bf00f888487d543f42fc48b407c65d4717-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/9c0420bf00f888487d543f42fc48b407c65d4717-17 deleted file mode 100644 index 635438d986ba0853cec981d2cad9188fda50cc7e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 95 zcmZQk@|B28Y&o&_G6MqxBM>t%uz-am&I@Y6#UMf$s`5Y@_rH$?YGh(yXi#F{1(G%Y QB_iwY+vv7u>`#;c00|Bk`2YX_ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/9ca2a086f1f08c7dec54d52425bd72f17c11056e-21 b/vendor/github.com/pierrec/lz4/fuzz/corpus/9ca2a086f1f08c7dec54d52425bd72f17c11056e-21 deleted file mode 100644 index b52fcb904e7846b16ef63ceb2dcd59428e1f4e72..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 37 mcmZQk@|Ey)U|?|4^5tPrXB1*!V2D4RY}6vb%s^|LQ;g|L|hUKKsA`6YlAD zc`I*Yq>cap03SdA000000Pe>@o%ZE+&|9wQdZK`RbC%m4%MG;|_Z&S3uFrNr00Yvf zPytkx3ZMko01IFM1MbBD=%4@+NF%Ic3;?8kv>bPxFFi;`@6aWAeqKW_jHKKq5loTBl0MJoVC{aKaD`r6|I>_9? zM-U21B2p*-P%g%l=nSX;C@ZZ12HN{%DIf&^prud%0HT0U0)POJ0HHt>000F508@wo zs-*y+P(lKPiUD5i0011Q3ZqJiAt#e00)2o9%b#m03!C@cI|cX z^ewwOgP}kG00jU5PyhfF00000KIUddWzUy*zYj(XS zo#2k@&yew7MfZI4zGbj5%K6?ETfXnR=e?&s%=R5=*Q$L!xO%M*Oy+NSp7(1bO*@-! zeNVjg!`Ha__?%a};t#KV*Ig4ehLd#HZe5%WSj~3RyPd!h+Oykk-8A;c00%OJ&5)RG ztzZTyDyoWr1qzf8ce-*`#_b&`~O?by^sztE~1O3vuo5Rh`LsGW40~*GGGf zPkZB|RQR@KQ$s-R(QfYVxO>gByB(iRb4rwhNl&iel{AKY-g)hu*{4F9(z?@gEw^V} z*k>>R001Z(;r99-PrD}1S6tohk5Or+g!>WWuW9l?;k?@!?(**4fMbO2WQ3SjEY|`r1R~S-8op4hizH5 zZJVK|2rd%0M72}MN_ly03Lu262{qixapuq!IDEzDy6p7ni5vE zvAVms$OAwqHg4?L0noVH4FKD=8s*Sk7t5?i9^UP%z3b8Dbba^F2hWYOH{EvTcYP0f zCNbRm>+Apk0000!$C&xIS@S)4>^Ugzy;~{NwKG>SO?}u?-^ zYoHD4qPvZGO1tfYRDJWhUER|;dA-{3yPL<#rBr|b$7zpvoxMJNA5aw?>zhca6KxH7 zeeN~f01B<{6~4WFd+yyc)y>^ERz2)Q8e0majit0`DFGT1=yEP;Q&{Xck1STz7;T2u z0ZJMhd8O}XJH_ForBPGT#_v6DbiBKCLidMCgSG(i-E2rb0N1CX>GlKR@1~Ri3i+WN znwxeQcJ1!PQ?Wj}`p3q7?Fqc-TjRM6 zn5%=UyEmMpdvxS-dE2MDX7*z@x!-N?yRB{&w(fn`nmcoMZEyep9RL&n4u-0(==*Bv zZvlY_pIQgEv|#k_Ub-!sYV3F3-F){w(DUYj<|MOax&;6R<#2euGN*3La1^9T%+}iO z?G7oq_keq?2iqmq54P*O0YFdy2J_0R%t|Fb=eN7P+2?uNw>fR*#qV`Zb#{ty4$bDO zS@+O4zTdH@9gy?Rx_gJm2Ww{7>!eaL;wXwQ-&hMT*9zW5v9^Y43I zxLc>YyAMoK?X2w8RUHV{rRmw7cYOdf3(p&a*_F1J(>865RH?AlX}fjRBv5yydcX(UP7O~WKtd4^K!6a_AsGbIYMG=B28i~j zr9DhE!XXGkAxx7fFsGAIpfmshpa1{@Or)YoH8jK$pQzf1^*u(Or?pHV%|J8&003#A zlSqjP1P~^Gig~FX)FG#o9-sgK001FKh#(1&nv7G_#Gaa|_>4k%O{xHB0NR0|2{e?@ zff^7H6G1Tq$iyb26V%C}pc-kDLBF}pS`3P@FSf2<+oPd3F>AFmjH-oyU? z{y+I=^?y758UHW)sj|&?D}vEEOcRL=vktPjM}MK`gq;*aAql2QoJcH~R<$w6NG@HM zgj%qzXo#$!+Hi<9B7#e3Ku}R?gaJi05;YLUgd$lJx-gCxa!LJe)1HckAx#OPH`JVN zOC?#PaX6XCty@{XcW#&=NwVo#Q&mwX`#YDh@#M-VB6EUtDnQ}1jOcgHj#O&QGO9?? zwM`PZTrA;CO;}SEPFzVKvJ<5!L0yWlt1FZsh*V+3%!XAB4mDdYU6h@IP0k^}^e)OO z#w+XX=Uz2`@|{>D<4mLq|ik~MlnsaK}nRN ztXKE5(&&Pw3cF4RHY+j|l`#Q1Q-3=A|7F=fohPEgITY0nR79e`oPXDk`uuOF*Emle z!0I9rlHmEh??WiZBh#0c$1-Y&C?F!D2`Gq1E>|F`A_{e3ooDp~q9_XySc4mi zBk%wEwA_#5eMD+mzeeie^}i25AI?;A6?lPN%Aol|A@eGOxKt0_B7jf=_xJPl@9FP& z^xvPRXI`fr2s^CfM9)9==bhT$;xChVOPIg^cKP*uV1F;3e5xGsk<5O+om#YDy^zN`Bpq>DhY`{iuC9>lniBQa-KuzxsB+k80|_m$A;!`P)0HC=cO7 z;S4?KORtHZ7!?EI*v+#@sq4lTvM8aXX?spW#P!1p5`m#8Qkn*eQX-P7l7SEvasO&b zio@S)j;$V)(=i|@cSv%;(oPW9wF<~69|&tHuwt4q$ zCr3M3LtE3|2itQCC{aJerPrwPI>K$X@f{Jp5~k=3=V|Zmf3v@auj5F(Etxj94!`uJ zj&nG_*|+L_n?JG;Br%_i)*ab6&ZFl-EIO~Kh{QuV)ux{<=?3}b_xGNexnJG#@5Dhq zv%pg4_CR5}^AOz4qY%*S%-@EL7Bd6t_qn&kVO%Y98YDN&V{$So4qCwx!aqFR6=W`< zZ{Aafk{um05dTm}W*nch$lsqa_YV2!_Y=pyp}Z~LY3((6AM8(`_iD7T-kEv-2^)Os zUl4`EF>*e5&&NIuV-u+NcTY?T4~h-GmR0aRU=EEk7J`8e9?0+K@^`5nvd88xh#zB5 zwg&|~JJp{M) zDs>sZY#iIU%+z3fGcRGb@x!-P>GA7}zMBAbo$$x9L&u0GcvX$5_dU{!b6IS2=Y2;G zja!)1cIw|XP%aV4N6p-hY2%W%Rwz`scn6>VF`!LItls8f2ucVJPV`#ZRCet$F24pT z?F%f$Ritbu6)VaLrt40hNO~VS^)CvQv55C8(8UX^L)rh#!D2l0gR&XV(PHX{KVO66 z$IadGxnY+513F6HzIwrd7Ph^uxLqi1V-FnH#;gT{qeY<6hb-=G>l{$TBl% zBSRa$*I6DgvtN1UlFc#t+Oxjf&tFM_6mUU7W9fS82fgxKC``yfOCD>{rf+XNz43~P z>N!fVb-Ha!vTD|t-gq(&#J(^j4$b`uiKg?hPl@iJ_Um4oqk#-uh3H3bJTnf+LvTXU zf%)kR#nV|>mZx^_2hG&tsv)4_6M9TWT}529Ewb_?{N^{tQYX{663pDl>9$2s!(-n5 zW5Iuh_&sY?J-!(7?RH>%oEs@v6Lf{+$EP{Cf5HpUDs{r!(1BbN%wnezGh^vB$;=~J z&S*WtIX=Fg=Dv4$EeJ9lX$J>0I={n1tm74u@SG+pb@1;9M11^Dbq5~b51)R6Z|UKl zO}_K#qwC*c?c6OUO>KS@3{u;xtq`R0Uepp*5XJBhn_cijH6NI91{8j-8J-)TCJY<% zq_6akUdp=r&U1xfJIMyzhYu1MLrlB7QXh>!FI&OBXI1j8UhdDAU&01i0FA*4ZTh!w zXo2+5cE48br4Y-N0h`85itU%W(G!d|fr_07>HJgkEw?i(@xATBAx>K|S^ST)j4d~b z-hnwff}-DV?ePK_K^6==rw@(7cPHNL=RTOnH5$YlQU1S|yq2jgeH7@d1P>FYo3tMC zrQ+-hAq9zRDoNn5PxBHvctKTnm}`@*XBZj8%+5&fI6P9vIiZXGva=Lc`kXFZ#Fn*a zZ9Z5zKe0iR^pXzEuJ|d}UQ^nKSiAJ!bJs@^6D@pmI_uVk`@N~Mj{-)XBSv*YI72_4 zUhjI-z4UsGoP&s)zgdZVkE+v844m`6sw{er>)^)zc^)lxMIH_yld!J3Vbt``%RnvB zzVWQ!NZGeD#%20`arqS^ckF}3&qgbklgSXHxS5riRp_Y)&`UDAAu=lUxUR%?wRjR3 zj*qC0^B)}(bFj00pCz+Nr6Bh*%s?gS-H&(g7mmAWAWY}I#{-Gk?t672`(dn}xXT{nhZhH=KL!i<5VEG0D zG_cG-QYzrt%pCm|%f=}G@y2|vEU%2wywlq*!F5?aK%NJFj13~V`)J<~R6)JeZlZHo zymQ?MI+WN#4K!bMH7L$HT8L(CT_YYO)Csdfc4RheAQmJ&H7qlvyP>}6w{mY^bfXb< zqhen1vc{bg$#17sK*v9BwM%!PNw+v-Q3CG4H>ma8MzCeHv%>B%r-D5NJUy8luuaGRX#v85KdUpQXhX&n#bi@ttOBqs z0(y4&0Xt3Pn=!8eg7Q<;kEw(`2PR> zet$n-sL&u1J0NHH0s4JB^P`*dPnv1?*ZmoF-~D@N>)fCCdmpwyK8z;1eLkQ0oc_C? z@BTlIc>D*i-|hSU$MQXXUiV_(1DL+hWnSHw@>*X^^P+c!8bpTJc*wa?_hKcrS7 z5z|+(oc@b3io`Bq=z`Z>sj$s%ehfJ1Jee-_I?(lY`h2*%Cwp3LVeAbN>3@G)iwW3P zUtaYpAd6A8t=Sf!N)U;m!c*n~lTtJpBcF}LKi&Cp=k)bH=g;)J{Q8#%oc_%8!JFJZ zQkk@H~mK~{ompJvIze2-r8sKGp|F_>H25ie|`P& zvWly3%V}{8ROg%iDZsG=WZ#cF9X}b};Q{(*j1T;Om&fuwPv8F^>G}Tuw`uIoA5V`z z%|AVHpG=Z}d;bo959P=FzdwKH)9d#A`Z{|){WLM1l#l*NUg&=S8K*H^blap>tC>+9I zQ#A+oOa4%5r_6jkwEw;iv&_BZKD)sG4!P(3lh-x_Uo$hN_;p2|Qn36Oea+id;`Z&a zf-F!c^6VSK+NBqllO4VfRKJ*6z!B*&mq>6===k>Z+Fa`8*tRqdoP`#Gs$UJGB4UM2H^n*e*|3uhke`q3F20j2H9#^_kqwEa;+k z?e?;R93%W1&?ZOGnq2j(%WV(za(8=(FEb+xy?YOicYgLiJXBAH5Rs#sL4m%YZtPch-3@P6)tir~4 z(=oHbB3ykL-m~(_KrYHsgvXxea(x4(LGOUdPpx^E^l?{asYxJE94&3HFyXFi+gs=F zKOe@w7C7aRw7|)T*R?pg3lHk}<^1%j68J~QzgTLQeeM@RLopzsOw2+=7G{jHBl9EC zX(UoX$oZSY_jdY0ZdgHm6iG8liR;?S+xFf>yx?ImWWPL&zjC*<)^pqTuKhZCxt-y{ zst&-2`=F2`{~y}ltbV6gC-Rr*^f%A+r?~!IVOVBU69GTc*tvcj++>IIHhX({==~qB z&*S&^t{ix80fjCUVSxlfJGO39Qu?R$CY)3na#6Cp$v9<0BmCCU=Gz^sCj9`@8H zdtsKy+Y}ua*PiL$YlmmDKhE#Cc4}IhFrP*Q_-joM>iXELdv(!SY*m_{Xd}BT``h8l|_P6qpHO>!U(;M`vRRS{#BTFa)lHW)EuebjF=UuP+U-JEa z;m2=>+x(yI>FjCzy65MuzpXCAF5yv7arFd?|3BxG|I-m)I=6v8m)86j>)VYFC)1lr@84n$o$24V?eDQBmgf<8XL}Ma{+0T(+cLTSiThpbTPDK8 zWAy%kcmBbQILRLkg|7Fr3fF!-VR!FP7Dd63A0rP0&hOUFej)k))c;TA{@edmV#Dw6 z@6ltTOO9=`>|_Ak-RtQo*){Tw%?0bj4@X(A?v{iD0j<2U*13dK0ECC@A?7vZ{hzR`)(fH zd$SMX9)6#pwD84>JlZq#N`bia?TckF5xnh+o{qG)Y%P%e=_mT=rzh|)R-_?_A6<@i z%}5Z3bZvzKahC^>C{rZykjV6f<^-WA6GU=BkkADKsc?>A^pw<_gzF2aH6OQ31>7m= zb)6C%!TQFh{*rMasC}R6{D6S3oo`qcLTcyR51p)J=Sl8-Hew$q{H4qY4gL+)?LSY` zeuy_mXI)9Cjg~#)Gyel0-fjswS4!Tuh6LPHJ>zADuG9(L%sxnim%9qO7RVK9q3}2u&;~9G&IY zl^b}_fz;+O+h%u6uIO(Mw+|=`;a&S+IyT9Hg_1wR7-6%MC)Ot@5(T8#bCn(umL8;~ z@nZ&;kV;2c;pC)jTq8=ydSN6d4JfJ07&xPB!@dvU)a~3G?y&+1lsJ;9}c9 zz<@EB^fUgZ-|l}vubpQ6%SV#5y z_V>O!_5A$w>UJ9bOsKO4?jDR(WqcUZMwLY>)TL&?&5!8orn0Vz zN?{sOkB8e0pJWHOgT~*P6JTpC`~`)-_xI5MXZimh;k|S;zn?-I^#0D*o=-fbXZ)$m@wekrvNPv__F{Q6&fa<}lhzk&LHJBC!}_~+@bA9&N$ zpURH!P`cyb<3SNA6NYDGsFrmIanOl|c^)u&iA+~?uBlB_YGOGZY(_O| z%*g*dG-7DyE|&5jYp6i$GO?yJd8!#f-h)ABUNy$NTC6n7t{h&LRJ7Dz6}+p{E)nG^ z_f5>4NY@cZ7rRJx7mL~U*umw>f|tFD@AGo(~jeLdiQrH$k(QhYFh!7^sdarg)FqAVo9=GGXnF7(n6t{ZGc`Ob53J(=)>tr+D)^_WtyQrX#oG;ob-!X7(q)Gn>zT#KJT*#c zJz^zb?a{aao!_&107GC{V&PY zt?f8g`+?np2t*Tv`nJkKJSJK;dwHJrF_GJ2LXd%lF&c4snQ!llZBKNHN%ZD5qd3(-#H0&t)5u*b>h;dE8#tmoF*7HZ=*O!MQDRq#mWM6(> zDGqAy=JeW%`661BR&NiO7$R_}H8#hDDF%ilSK}Pw&Aq5Kx-P@CoU~r0s*1FNB!~=* z480TOBL5p!l3elZ@U^m`RkcZ1pT5cKt&v7Q|3QDt_BO(mmxH(U zn|ZWO=IyFgYXSC%&bdZ^}|=PNDEHTJd??-QxbZ z^l{;M=4YGU701fH}KGCwO}G`u2x_w(iPD{#(^mMATAS(o~lgZJ@uxA>lcH0z*)puHdArwIVq94y#`s4C{WWbwn_4*;o*!-U8dDn6Ez8%-O*&Ne(I4>-) zd8OA|``y*kqJ>N~InqZ4DS9YE9kiFaZ zJABi`h1@|hm+Y%k5u&?S4NLU9iwGqRES7o)31#H@bpbfiCD9JtzNuk3vsk4~o^L~F zWEr-ISLK6PA^`u17jHhQ-(7;9EIa<++G_ocRcqQCUOK?aBmy9`5-z6QEJO8UHi_8; z=@^r36Pi_)AUo>rjh!0XXpB1NA%JoqqAj8azD$=AM2cP@((ThuMtQUr@Qxp{jQDVt~oF=H|jABkV+(nuN7Bvy&TK#(N8-%db+s$Zd^HxmeTk4pv2dV z)s2#acH4K5p3>cJ-7YMOBz#ZrW8b}f2#DGn@Usep@}HSy_Y!@mC)xHOBIUhq&%d>{ zhzjo!e^BL#2^6y1p#hPC4f*!r!GwcKIj8RrR^4f>i1gEtGhi+;&x}5g)-R6{F~;+g z9eEewRFrko{c@>CG_E^-yWVDRU7icFw^2~Ox>ff$z)(0l?=emY>~9X?G*VLn1_Qp~ z_fa^J7!6B=SA_uan8!Ti+PQkb)~`@arw%al!JLPRD($Rim%+QXCa` z!*VwK_v^X*^IrUo5Dh7%8S3#jtPTXSfTl+t+d=Pt3_hWcXiy}K9Odt*95!+&@LVUX z?7SE8OIacjoA}rx>oI>Y&84E(95V!F9AJ#y3L@uP%e}NluC$%4ZqMJ^`}l_XdVR)<2$hRXN2&q6fb@| z?K_EyviiI;Y3b)f(_@unX7t|5+7{UmM`eo}3=v{&ByXv1qeYytq9b)%FABs8yI&l2 z-DUa=#9X}~%eGkVo<(<^`|S?i`!=ivRIFJd9hS(%f6_A=Ino{x|A`cFw5sv+(QXjB zR&K|}Vg~0pe3J>tN22BdwGsGa=jInkXD+kX1J3umh%Sp$=06=y`d@R^-8L;tW;|A| zuGKe!?I@m*2$WC=*dh!UA~hF_#IR)4Y72Mu)dv$-IndO}b*`}aPhK$5#z%hfpgwjhply|EC|ar-r9HHY&Y+CmzJcE+%n`ie8(*rA~v~LYHV%88yl=+46hkAsY2P_Yi8xBA5Mkq4eBp9sa}YJ z#CiXvcP9|l)?u-K@TKi1wb@v9>fW#uNo=N|wrBgu-`~8pBE!)RN6Scd%G(7!Q0&8m zF(#9D@@wkx)@{#CP_TFMeq+7w_Und+EpQs^YYzTE{>**w@hdXXGEcTugsr1%L_gfq zU9W4==SU5kP4JuBEuf(*TL;n69@3I7Xfq_(?;3k%{V+b2vS+qgBx(g}Wy&zX$CLfM z6AK82_mtR<*}$U`>l^hmNfRr_ad;l1UaGwti|f{}geF;H@F>T&cazcAi!5#JhD*I& z@rJTw1t4~E#`}rn>;EJAJL<;xL;o-S3w^Jt`*6NRy=GEii16CqM17y4cKC!_Vuht4Qi7))&$5qjJ&y_l=-S@K=fa-BBgXa8j;p@ItV?3C{h6k#3WY>5Cz7?0#b$Bxf+OJ* z1G3k`7&1>u$pপ*w{16sRxMwP7>6;%--uBRKv*_P!$`DBgN+)gaCYt8!wB7iv zf*{$v8{_|37}5`*nqaZ^g4zMWw6Yo8GcsdqoE@GUG@-+i38uU0(uxL%bWr;u4S246 zIUtZOK|F*U{}L{I+8yCsqAhJ_kl<;7dqw$d)TyuGh9v6mt=}E(m4{=(OyA!RyE)Y= zqpD&o3vNRwr0Y6RUCRbGql$TdzXE=cIa8GcZkZQxL{f%;LFg@~^zm7{+=>pBsSHZA za|{Di?mu{Y%jGbP=5t2H(wX|S}KQ`Z^sZI~mjgN(h zg8hQMbIF*_xZX}{5*Kkpj>fMya}W-nSPy0>uT0$lSvabJ^*XrwiXLOXn!mZZ;dQIp zrKQ^A`ypm?jN;rwA>ReuUhNn$NL9BA{;968!i!j%?NnZ1<7}%Xpv!Hy7*zXDi{k7y zeS7g#hBl@^7Sp~)q_WnhkZ`1=PR~v<4>YkXn^`?Gm)0|JU1JYhZgXroS6gUFHj>O1 zO;z~vL--YBT6efT)F{&;gfhC%+Zs9acRUH}X{+E{o?}~22yrEchuO-&&c*B zr-vs6GJ>|Dp}J3llX(#r#(qxq<-WIr@4bh*$3zaqxPI4VvOiOgj@PVeJ3jIEfIfB8 zd~t{Q`jMwr(^~rRGf30AIm~$H;%kI&^|2oJMm9ZtTunrK=;}x^SH5wd%)TakF;2li zX!s6z{x#~m!oDMWj`y5o!(4RBr7=-Y0vLlC<*A_%yvKL*yS4OW?(LTiR4+|_PA)o$ zXy%#ninn{yOHY@FIEqX`qsb6(O#_x3rPIoM2dWv~H?BPqrF2i2Sqaq= zB3LzKOSwjhIxnvq7nu(+De9yz3%QPUP8aRcmyXwM*>lu;F8;DfKJ*gI$<6>d# zg`DS;K(X&oC(-FBLPYqHEfds@8rdIv=JvYdr=mSUj`NVCM%%0wx?4|_!T9R$wzXXY zN+)awF8U)Ioc{=0p4fFt!D1Vf61_RM(@&LC2f2@H(8A>|D^MecNd6_HPuF0Jj`bb* z%l|*8&ftA9WysT!+UbSlL^)+?AYs*;;`m(KCZYPh4S!%XcENJvmyL-l*%Pu7c2g23(=&0T!9UXS|BLh1`7!wfhMo_Bnbk# z>;2xjng^tfZ6H$9Y!E^tu4?XEaTeJH1c@nWa+{dy4|6!xY}d`+-WkUWaa@h6Awp2h zQfP&(Ne!f9ZMHyT`au~^BGs7@d9o~sIU8`jk@BXG3avB|?f+~Kv$m z{ZpOwnPXjBBjOCTN{@rsnRbKdP z&MR?ztTjvTeXS|qSI?~4?YoHJRo>52dNCDMSWp@&UkV+$^FjU*0UvC{u^ZEzgoZjm z=Eyj~kXe1!na^FS|8Mz!)%lKd^o%4t;0}G@dyx_S4qM|I@s%ZY3@jH22#7Rn`V4q* zY)|BX7>OB?SQj_f%nV;z1Yz)73hi{7DKR!I7T4$N@f`g!?tAXFQ#i|3LKhfVa&VIm z3s{-FQK+-BxL@>@5qEGsFYg?x;$HCQ_nTeZO|pRFb?Y$j(bA5P=^*8xcary5Yyw{L&f@%Q2ih69 zQax@y6MyLMRv=3XFA_c}Og>NJVq_l`B2Tgr1dxj;w6&+GRMD{Gkvg!XU}UR z$vDQiV}WpiLlcE3k`>@DOqg%Ech~%o4n7;CttlHD>|fckbVs5-j4*&m$F{^(?T8qM zO%1i_Bt-Oa&X7i**E3tSfD=)hy*XmecAlAxZuES;mx-2}bUGcb5%v$>o2t~ipE=3< zLB;i+VdRsnG!b;S2WVpPO@cCP2VG`dNgv~--QhIrB1785Mmr$+)&&rZfI{ewh$UU% z^v8RAVeUGH@>#)r!wN4f$6vGR21%60T8z(h&{ed$YzA7H{iG+wfehVsj^a-Z^EgbH z@!#=FJ%1sn0(u&A;uwQLY;?BP|p1Zm5XA@Gf5mXj8Il3yS ztO{uBdQlYQnJVI=e6*Gws(vbkEX{M#TDL*{7JXAQFwU`YT-D(gBP^WO_4$j+r=kp0Rb( zKg+qBjCDSr$FC|fKQS?AargK;yEq7@YwuRYk<8l9XVv2@MH#sJhZqJ=3OK^i4q&*i zvezJGrH0DP^`2E&YU=8-iI8kXqbZ58#{Oml5r5P8$1`Y6`d;ca1h{4+y}RA#qACnh zqfE13nR!sq?t=Lg@@|8P%}bk?mh_P6n}6T6xEr6t>m*VB2LqFQ{ano&wK4AJZ`#<9 z_g{AqPs#)!4WWD_Uqx!Atv}2BTK9H>0VEPk(cz{^q+-YrGE#l(W2uEcSlBO zJ8gj_9o6vd!#-6+f;IZ;F^nD4vw8ZhYldy3XZ~%Y$<{c-w?o_mrRVRS2wCZlN3T}< zZQgl)KW%M;P)B8hq1!RA=!qiSc`dzTbM8kh4nFqR@qH!Z`s?!!!3{C(TYjT2vck*> z594|)L=2^Y?@wa_`IM z0G)%vAHy*B#4&wVsLV77*BMDTV?yvEqkv-JN2c`^klUwtTy~nlyBC*`gvnoJnF2%t z$T!-CnG4n@^qdEuFBBdKoIYtWaoe*4ZVH}GK85dR1`hZ9`17Xj7(LNFTz_d}&r~(V zb1Zx`K09|0TQIQf7?F_?r^_WTuC%_tgC@QAhAxonpXtr=LCE{gaSg^*(To>nf`Ag><7Fo#4H_P-6du`XL3>ZTfJEdb~!jj)c_8xs|nnGW4tzvxB&m9aOD z^7_^N-Tb^CAGaAfuUboP@Hmj$J|X{3dfqSYl=7$d>r(PZ;6ugnJYm;2`^O%)uDRy- z>#Q92e7-RpuNmI3#(Y%kSm~J;6`We(1A%ZONymM-Vf^C^5&)hn?OCq;Z#mF!uLHmC zgfxGz81RYUR!69pw=&++Gu&{p86^<@l?m=cKsLoEGgFL<$l)JMogr0ZO^(zSBH*!- z{*e3~a%HLzIfbH)cgE^8G??uzMdLrKB?QBi77j8$_pq%25G(CV$~`E zf=%-=5^!jD$VKk5mSgm|+^!{T1BAFlG6USBPi*QKu6nNT`8)@B%cm5`JN2gSPTgzQ zm<=5;^M7;Hc2?I|U1&RcE_)*J<3@O}>U&|69{nLhq6}Oy3#%c1T-vIjbhnx{h)jgP zX{_BTq40a%o)hb>&m^o}$c-yq;+%SwT>Tm42R(=Aw_4wwjjZ>771*14!!?Lsyy4Vx4${JRyiVVpV7Ffq!efJgOdJQ8{+<-bPU zcRle#&Qkca*bRG!yQwMR=DB|Syat)RVeCgz;TWGh~%>n zu18{re~B9dd%x+mzsI}l&gP)}A$Mo!KiWczw?!ts@!ZEw>>1w#zBda{*w>La;u&vb zV-U5S#_NR>Zt;u1=diod{WA@w^`>Ro3_u3gUN)2c#w>z<%k>|>RhPWLe16ZvLyH<9 zj|}NgpM98PM_-NgXWu*9q9m%p$Te;jRo$A;H*xrgrRtRvc0&w~r;2rVT@kspa!e1Kx1Xmj554eP*aq z5a>JKv-R_;i%vb@LnuQG^6X>jf%op*B2V38cWDC#%?`kyX4%HutZeiRT~GDt#`0@W z#Sxy|dhnY( zp)F4O=)wlgyk0(WO6Nm+&a_6t7sK&?$igEH3uBAC4*1OgK*rP=C#Fv|5S)%y3bnq? zzt4&A$+7rwp$|z21&greKFb_O_dkB>`O~(QI5VqcBdY$Yucm&DbU#&k2w*pd8mh3d zu9G4!PL)IG>~H)JI7}M^>rK^HPXmCABSYbjn$rfAiJZT3j${~C1N`!=Z&J(AylfuzR`{?YVRb=RdbQ6X=G}n?Ey@ zC@WF=2d8f^Vk6@k>EP0WE=X*3n|J+#J+WT#n{-ia0 zLVu0XSLE9MOExiQqy#oU4yuWO{m^zchKCSIgx2VZIFVkw%*3e_{*Aw`ga}AmACuJ{ zygmc(pN_j>)ZeW^^n}>mP8_asHXlB8Y4oV*z~a3EVZA*(EN^gi*O`bw-P7Ee*01U8 z5Jli%mC%US{& zc}~0n=F$&D4G>PoI>64|-(c{+$nU$6fa05?i!r(ARX=c*#me6QTFd6&L#o*%@ZPV# znX$d;%B>PdL?BtZWZ=*D;jQWV4RLckgq`s!b8DB2`e_De!)L`+_;1AT9r{ze{CI`+ z)$xJR4(pnC$CA7#c#c|&502b=>|wEruGnCQvxs4Mi(!-W$r%Zceqj+|9(#=;SFDTX~YjIKOO!zNAXpa z1ioS%emCwh$@+934AgMS4020{`eupsaVD`VhWNxO@>wkPn^2pHU7%K&Ogv+uE&~~{ z$IjlpSS#3{>ko;6%`mHYGb zZD+iz?^;KyN2&@_#*%4kB&?1rm#%WGHek^iAevH;@t3D5XIX5CF z4c#hZ(K_~~gmm2{5ED1I@}-?-Rp>Wc6=AESZp2FoOIZEqFJGxFNA{bsk*vgDWyt5O zPt&}wUpM<-_Wp0Jx)~4r`P+i<2`y#B?8@&32VQPsWT9^}`g)6QM9{lKhJVu{Z_9pf zP9<9H$^IH_k@{xcWPA4W|6yLqJX%f9 zbLXpO-TyD(ynmAK^}o#^i5>RW5%bG;BPYhFyAX(d1f~PLONyv+NhFb@RhB^EDTTNn zh#N1xRxZ8k_Gm{%C=j<_4kLK=`> zZqCDC0vi1v(Wm~@1U6s~uQq+Gd7xR{JY0D44IYLP3c4dX#bo0-`oDe9pl&hETSZ;k zzNA)`47f}Q3E7yCee5ZFw;Ldfzeln(c{Ar~EfRoZjND|nytKW&z80qOImoAr%V=|= z9ovfUViwprjz@kZoYo zU8`)|YDsbEG&jaCMOeWm&NZk zHtj*btJWfJxpm3XUoY!N$8grVwX`-2zUXX04DFsFhtYV;Z2v}PqvjR7kb@!Xj!E^8 z%T;o}TE-t{=r!1$q8$K4Bq$*X1cb|5)}X%`u}a{2KfZA;a>6@?ea0 z6-L4=^pPOZ_x-c+&awTQf;#vvYs4Se^pv79 z&6%EHekI5l6yC88GEj3@wsQR&(Nkl6vikdgH^{~n*VS)WP}A0PQp9^+m}U01tnPeF zSjtC*N6gjcs_Xl4eMG)A>DJVcJ`gG8472MsPO;1f$hj#Iq7k#3e}B@nN|UPMWM_#* z|8{GuaN)|c5$|6f1gK5e1QTQ91i}Feu9F>9wBha3uy%HA_)IdCY82}9VYh3e6|8{c z@3rJ=o^Nqr=FYG67%FXpPR)RSOQRELm?UEi)I-?c)W`Fewfpjf4)lHTgWxM34`-zs z_iA6Sp^{|U7@VWGyjS7vSirceoL2Ev^1hkQI^8`G5dJEI;A~tuXU7#EuC*8oEUWyS zG&?6T{u?#sc`BwGF4HE))yX0av*SjzNp=@tP22kM^{K<3VIRxyxJ{e}zsMLill7&F zK_v&9)G>Rjwe5<_I~~F%jlPclW?Afwhv}aE5xm#+j<<>Wo``4i!zqZ@!EU}p2p}fu z*<_kvoCH3X(PVsky|H;@CJ6X*sC4`%X{%z|t+v>*x2w@#706&=kN6j%d zAo-!8-tV^`lgxM`OFQM<@)c}~1?DyKj3{h{&v%z8Tc(?Zsp^#8o=FaoDOixr^usLx zW`RkpFhGJL20|zN=H`jbYh{c7J^#=wAlouMpGov}V`5_J){E$~kEQbU5M}4~W$cK| zwXz&-=XbsGL^@KgrkKBF-BFWAXGR{5i@dAeGX?|^dczA{q8v5%x@V(4u+2T?R3_MH zvDw1z|FNFErbqVE4@fE=jQ2g|ifdeV|4XJ3Gncl_QBwI~yP?(Dg5g#K6Ot48S#UKd zdSpv}a>HFkzTOT9?C9Bun&Y2YGrCukl39=F+u4zwVy?<9X3&G;R#J=;{E#3zJ!W#C z_HF{J!}2{wymbgCt|y{T>mKaezt*HvoErR+6o|WTtGrC1e}#o*qY-95rT?pP$MzqN zy2i%Iw$5H#=4R<@;_R^wLbWd1<25;2w55PsT!vzfb6 z+Ab$QHzRb$D2eGcF{||&H7yGB(3!0Sj+UO!Kfoaj*lZigPV6cu*2!Wp*VoF_Bpd7Z z##!t6m3^Ho``5M}y;D^)^znl?aeSNMgS=HX=A-sMvyJ40?RMQ^OaE_5zcGdxEo+&y z7xp}Gh>fY2>r1~sj{O5y?~Sv~TZX%FvobroH$bMCduC9qX=5$cK?}d%*gN#aG!ja~pfdOKta^JkDW2w937oPpUP}j`_m2-Y3@{96^w$InH?)9pPg% z{ZcYQDakLcB@f}EFAY8aqS5&^rP>39j4_K;y^wd*m_$pLuz87jt>Gf<&<#ENH5DeW z;l(*>W^KZaFZ<>9eEdZywFy@nRz|8zQqQLm0d#xyrlsk6aH1)lA+MWN(^a*cD9}%ljQ=5B(p*#T#5n|);s;5(XZkU ztwQhZy;t{5f`34dNO{FjBL$%|LK)E#<>}Svx51&DeD2mW*~}dsjWJ{R~U=COI+?ZoL)h^ z)O0(Eim2}sBIi=K8hm7@o8lfHEHiPui_9|#bk~;degV&mS_VCJw%iR72PY=3_rqJx zysM3Q_G&XX)!vVy8>BJ!o(W6aPrSf9E!0e_agT+@8zJILD?UPa&EDP+NCT;$*FtgY zwD$PZSjB1-xJKfDPZ0fe(-L*Ly5qZ08#T032x>ZlXt$jz$@z_t3<+~{eD$vmj&wrv zs#7O(J0n>cC4=eG?1j?Z;I<8DJr;OE`L!St0dhzTkiw=2+A0`kf~s2M#2pDluqQDl z9uW9CiPg7JPgB;U)ZrG|4aSOjCd^1Hoj88caEQRMW`7;a=>$#Z%_ia+)m zB+!bQSxu;Big23s2Xfx0Ck0mN-xYDx;vBNEST(g7?Q!{vfReM28XdT>+J%xJm`$y) z)n3;%0*$EKQ$#V(x+7jzxU$(I{GlIJ@A~JG4mGCs+V#{r@l{}qzF?c}KbKqS1MQ`s z5;IRVIh;m~v&qw_WkD7rKbc}WGH#X3LZbsc)lJeg?6_}yOSG{)-Uc8qX^7WcGmc2+ z;A3mdNv|T%TV1L>{9%f$KHfDfJsV`As)ujev}a;VF35Ka6N&?}1Uc)?yNK4%L52;6 zNOJ^~MD|~$7VI9bntP{5#h4DTJv%OxKC!hmKk2WnU5sBFd+I^*30@n9T_h4BEw|&t zRI>+?PIfaHTF9cIrO-hJVNHjq@UtR-Km!bWO|p$*`Edl1fTv#F`$12nz>%J!@zIR2 zinDve+ND=(sJ|(J)=a++afaFBvqTrpFq(-W1K{r$=>luZ=;6XiRKu7^2QCx#>;($r zD!hma^6jGZFsOmqtRaX`MJu~>M9Kn;y3(uAjBW3E8_mRI7HK|E=an>h)$k{E(|BNU z;n_P8!)}ichS?%|rGQmGGZ$~7C}a0;*C;tT*KR!nB1t0(S7dr%;IJN3mf{G?Ohv}C z>LTWrm1w6wG8KhNC|+E8#CzT+N(QxNA|tx}ud#D&&z=zDnE6gLFbc<-9T{DG`sW?` zxZ`cChiTc2zf6F-=v;J1?=xnAv>4aC)K_dXj+@r8hrwJX^M{4lL=t@~cv*@hQLtRd z<~LJ@#JHJ6g-Tg}B%mSLX(M2wk|JG!1dw%=FkuUgN# zqm+l5d}qn%jw=r3%B-4<^v7P!Z(Q5lWYX{=kuV@I)!n!rirL0DA^NGFP(j1iN(PLdir-6B;h=g#U*ca;q3n4!5Z>`x&{l zyb#zU?jCZm!M1^FZyNuQ-tD{1nUovf)(u^aXl%%d!HME+W~N_cV(5qzF4cb*-3>?y z#-dzEIsoB&)6MZkvw~)p9i>4f6n@i|z}sbF1BHGwT?%8-OeV+o)+LZ00?Ujmo$h+^ zvF7!@yEx{A&a$vBW?uK|ouB>pjZSlsgx>Gp)db1&d+puq#}~W`*O02;6qnNhwr1Bo zVeV$>5U#<6=}wjLB)xSr>6G8tt!e^n=09r0$fh*o%!V2=4MU%MFTyfxy2Qt$V~Y

R{mW%Fyrh?fY{b)`Lo29OC?@cZ>YS4}Cec7oj`}@*#yn zQ>iE*2~Jap1{_sr8ZkeB-Sh~Q3;i$T}<#M00;5e(eb~(kC7xDCKOp z{7$driH&)^V}B_&Fnd=7h#x+i)(zT?Fq9urhrZLxwwAqq!#53EVS|}u9|`kD)3X*Y z)3_eiVift5+ftl4D{2U_yKc5GQAjP7BoZ=WSEFv;VH2?y+!kmj6A9cU1*~l&B0gW| zRg3jE8l3e(#u~D@s=>b_>T@~TUqp4HU#A*(p|;yjB{Gd+*kI~q>VBG$?wE)V%ox;f zImP3o()Pbs#Ma)g0k@0lgqv{7akRQ?UKTv7-t6=sjU8cFl1rUa)}tjpSv~M^e2Iu^ zryGZa=I5nw*B#J!^9=r6F>#cu6^LA_@78~pG0nv4FtWty{fGQ8VIu8jtq_{7g%A-I z_aoOFH>8GFM%m0kekX0*r=Of^^q2S%RwmyF?AX#6?p<8MHW_qSVkcIX({`XJ*SdNo zzbrjdo(-Aw;_1|1&A-ozdX|G`>h9kwCHcmc$iWvKLRm@az0f@@!-aPJv_kxqHG>Oh znJ+re67QX6@~|zmefZfVwz4Z<#P^rRE8$ppeY9Y~Av0XO`twzSb1)v6WxL3G&3}P{ zeJPJ*Mo<}<#<3UHGjRmMbLi{MNX1`$3^M0x;}ErkS!AoBl490uZUehK>1IB0y(MQ{ zaFaAVK}1Q!zrmO_x2!SAMYvm)0r$_S4hhFikA9t)+F)0oAG_42oXXY-N$t09=V==(Lx=0hP+9B zo;3Hp#5wA}wX#|ncE8z~nPwxpRPZs9S}yY1U-r}zChAW_4=$}({{+FMp`S+Y#-H+5 z{BBzGmoE;9aE^YoA=Ig=C#ntktm>pDDS-x=g<>18@RxWLecl+HZ_^5Mfj z(lUXehujHo*@pw$lBNHJ=a+ctmaB(6jW8Rynt7TLf7zXskgr#R zEM?LsM;cjbwR7s>XL!D@(6-dp>sm0u?)1w)tTUReb~>%i>ilKi+V#pCgjx1scgtmk z`n{2ptbEOlT&8poN2~bfYAqCAUjwY_L|shpa5Ni_s{RKz+YD{?;$%;_fzgcK2BvyK zK@s;kH^g{F5ULtxvGB1u24zoGxk&Z+^$z{;GSTUVJrrMg`-7ZJ**e-Lke$pBOqe8= z(vhf;f3SMBzlZG~(=(?OZ}9DV{`!IlGkQ1YQX|Xz^dX+G^iTL?FhIZ3{-)X}B;~!pP@WDw8bIkC}3)k{Hl6}&-ZeD*!a|AbIrx$p!$k;j<$di5?QW-Ci zH=7K4Rid7NmJ#)}H#+k+kC8Sd$<#3D@Y|$^yje0qhg(&L3WM7i@_Tbz&bm~$`Jc`1 z*C|_Op1Ag62)yjm#}H>s!-azw1h8M7jKnhzm~qr?%8+Lm$PPHfk6owPwh1AR=$Wg@ z0GJ{9xkTrYas*dQF?0elZ9X8P%d(DktkSKMV`upftmxL_`nIp;5MshiHcS@(do`n% zaENeZs2lOdX(>`h*_aiOU^9Zmh3#=I7}hxoV=PwM(W8m))@hyjBX=8MI1I8Z*+CCi zL4yoY#H}af=hdwF*VTdt={>bPeG4N*F&1^MARoQ3huaNl0!eW=46wg$k>UJ9>_W=2 zL~njjCs(~k5TPc?J<_ZT+J6^+K77zWxef#1(WB|=8ru3z=5CvN+&i*=e&YwT%0~o3 zvlo1Hy&GypBQ7hU!OT7Q$8DrBHU6D62qYQ(oBJK}#UFw_PsoO7>2}Y%s&p_~DdXXD z^GZeG%71|TOmNQ-m*D}bez8vO%IB=*{Z!0@!n5Ce*O=l3@!IPI$7mDu`D>)lyi40_Z3ZYlgo z&RL1ut$vXk5VedpL(=O{7TL&TezEoQl<4<|oxYi`Mcb`yiP7lUj-A2XztObsuuAd& zliM3#QWM>w48Lm}{u~LF+qAQF4F$tlwpZ|mzoSww)mgLjTN+--%r_re&2{(mXkz!- zzcL-R>Z&hgU`RVlZ+tC?c1lq&TFtMYO#ZBAGT7Qz3&U6o8dWUPKym7EtPb8K*(w_O zmpwpXbi{Dr!2MI>GTYBOwekhkiN%f(L^A(7p`i_`qVHt)7q49u$dQ5@y zrv7htV#fI18=|%E0G7o5C_4%I=|1^D={aJTS!;TWwUPI?lhhu%j~m$!KT6I1 z)!<0lSL`jsxfwJ4X9bL4|2U5hTD2M4p~k+5urxB_LB)g7dB?q9+x7UN)J8d(Ii;)? z*Di=+7>5@Ha$h-l!YuPY3f1#$(de1X!!dIEm#U7ISa8@Y@sn}WMbi&qtML4jn6v2! zO$`tWS-y-2=ZHfztn~NzMi>6ytbIHfK-jWB6}zz=)fHu6y!aWqmP|gDtKC%Cp}Rt2ADUGrhGa;ul-zoGA=0vg^Q<7*gAw0CA__LJZb&a*hZ5s z_bt_6i(w7p-z-V|!x%>Wqy9|;f zdt{3Zf4PQ_*FQgbkH!;h{jeX7b)=-i1l;%?;`oplN`U%7GC&=)8Q6>w$zSigCt!Jt z@n~A%>*91f&9-B-F_>vaokdD*)KXVO-m|&=bGDxG97{90&LW|u-GrMa{Px-XJ7GQ- zB@44+kK!oV$pwOX(70xh;oJG8_ivIf`)At?b-rA0@m})+n9W)C%2DUm^VsAfs`u-D z#rZbR(!_MwA*z^^@p(R6WJTt(@j|fvsdmiNjK~5^kY1%n~UDHfnlN~r<4TL1GbUv)6$N4Cr}xsM z{Z3g}v^Q=L+_SH~i>1)k=Ep7K0~s$O~k+*lj&Gbv_B54qviABy+L9zHBRIo)gUg08to-F-3M}lM-?t3 z=!GWK*EYW)_;+^sMml1b*BoVbFAreV9hCN1H|0}={QX^n-a7jG-rk!79C4PlE;Ubg zk2zNnn8-g^(Icbd+pUuax=>D<(l}v*H99<0%Q#u|Z})%0pULLOwC{)9nG!v{e<&n` z7!9}lalP>~uXcAmB^>)bd7FeU(lysoGBCjai_cuAb`e%L{7@0@ZuS)k0!);S!>uu7 z*hIr){m@u9eNEi3i4cY8?NzbZPz1UpWa2AT29ZFd0O0%G+K+`6upt|eBQ?io`n!Yp zJYd9oKMwQuZ?ZM9bEcM!N7D;UzJ&B^Y}< zPYdb2*lI`gzZs5-AUGfM>w%|DkXQ5%!vkmNrVoFVpmSfg7b@ z${6L?KS%~+I~5+14BwgEXDJ1F#bN1^4$O%qm%mIHNkwan&+~ZvIAiostPvo_{X0-| zSjFLosgIL_J1_~9az94ROp+ZQ7apN?++L>5a(a(X82$=@WJZfx*}C?Su#>3bQMss!QY)`V^T8>9RP&C_%AWzNHyv`xqcXkv( z`o9BFo})Jnx}uzGeZ`sX%Sa6Ltk@7z5_~Y-X))RK%GDUW7(bU8XfN9cKCe^EQTQF} z9^_c?%kc;1|KjD@>9%q8+Rp*PMyRm({4C}}2ry=*tsS2l(pJFt2Hr|EsmL-a4k5!SB>2)SS@P4UbjnY9Tl$TUJ4|!CzaI9 z4r1>Qr*l0dMsKVJjisKN?k1S*5CaZP>Vzi1Ss{coL6sSK0O&(&`esHjY-yHq|Jge; z5lpp!+o{Q&yCZU0BX|)iJp1(-IGcl(XIM%GQZAth4$a8ir-bgaQ^XVCQ@)Or=**cJ zVyBAigV0;lV-Z8MHGGfk&P|96gVXOg7}s?)L_ELeJ@O4~+&}vMLw8eDTm5;ayjXr( z|H!XNNLfLR$U5n*4zc;T+oke->s5=5KTH%f?iuUqW2qN^8z%vsm1+ZRsu*#)prxpmSV&jON9V*U8hCE6isQEzxAqNvavhr zny52o=$6`HwmX&%jyA$c1BT)nyum)A=3^44V`b7F(*BlW+hpx|anFZ%Ndph!8!uMa z5ct^Bt|Em1hzND2V3IB&qJ?rV8tzjSVmJK= z+hWi%a6ouoV_cCqrF21nnGqo3SyMeRi7^)@}|nezUH< zar&UajCfEWhg!}f$P1k*xlj?xU}sAA6da1Z2BVs2oc7|L7m~L(8g{mo?A^CK7# zI=!dRcbLef3srXOy~hy>F=cGK2_{(1PG>^@qQwkO>%nQDk<{KcG2o} zz2})j-6#%`ree7FGC0)KqSRj*+ukvI7IF``rhLdSpEzzz<@MiqphwiTFPmPUtx%D= z5XUhdQi^1uIs7zRkQl=)wpc+r?bIU4=(OGva+mx>zf$k!Ee*cJ;T>!s)-r2Qe|q&5 ziDa(9_w6k!GeIm`lTxYgavRc$k&C?+X7Yf0%pd-_#xKlo0sf7_wmCi@!$>z8+|Q)r zDL7uJ6$a;~YMTD0sq@SS4|Fhx8O8t6byC0;xuRAXgh9}(;7bw6Pycjie+)95!Ew3q zGwQy6@Gl+6qjM~Kg+O|>hvBS0Y^#!bI=a$qFw9%?yCMMX51I=#mMWX%rw7qjjf>|x z9`L3>K<3uSm`O<&723$@5k%fBAlp2hxzvA!IO%vfEr#mR8*JlL!ReodpTXN?L0Mh$ zCj>vqcqZyuu34m7480AJKv|ZAM_^w~l z<5rGpw|xDX#Vo!mOe^>%vwRUZ4)=a*Q(4#iHk}i)c(c@TFKA?p9wKNr<52~=H}%1~ zwr`z238M^sOZ(g7H@#jW9~YLmAWWcI_h&A;Avf~P_KtSy*y#)%I?%p}z9S5!Edwap z@r)Zf>Oa0aLln28Tt48W`S;>osxXW(hDmAv- zo$X}lKqgxg~=fj=c( z)*d?%XAyX0^ZnQzV}qH-JJ{{l+KRuHW7)KKap89xGGcL?wc)muopEs-d_)TjZ-8IVw@m-K0>>2Ngw4_Dw|F=AAKjR@~Xl%QC z@-pE5u)X%JZ*m)dhSU2!4{^ABT^pZo>QLdfNI$EuslUBLvsg3n_Fyv*41co2FZwsD z9*<-Wh{7wTxM`i=#y@4iuOSYR7?#X=^x$Ko1iz?Syc0Gt{w$khl!eu-7={rNF_ZWk zknpGdHOn5~7hKiasXxJv`SRiunR&QK89Ou!kVWdp(#BI0rQL%u=%oBMAKijQQuf=!w?%A*>bS}XL^>=sH2A-;& zF3KcA6SOjJMbBqrF2h73=T<)mSJ#EuioY|M4Yb+XW73Kv>lug6U!c)_WtH6MCi9?kQCa>e_>s?;l{u$Lkj#KQJhv(VZ zS^s4+dE#nFy_+)m6@r(BS+&bp6A*dNqb`Te($k1ztC3E5i8E6qrgJVFWU3hNk zGchM4N{GgyAL>5!ANG5F*+{e!Zn&2pw}ZNZ?vt+_?uVqUJ5Y4~rSe}_!XK9RtGGgX z>&wp_eK+>bkLTG?IwNdQB7|87F5DiEJtQ!>_FEJ!x=e!bXO7@8{b*_JJ|7tD z9pvhAaOQYzQ;m)i=1Lv51xh_tbaRzGDHxK zut1|?Nf!6MW82(I>|teRC5KF$lVO4^hyxC-@xsA#Z`rMg4lsF3Cxmxc7<)OgJT``; z-}e^=h>|Um0+nuzuaQs*qs?OYj|_OZjhM&jz8E+0dB-2cc=wHAc*mrN>#X$mLa&by zMz(ACK8VJR@^1U?Ymieju=aPt{;y=v+ropZ>l(!^8Fpl^j7kpAUR4XF2od5%?d=Ty zSCQj}{!l%T53#?T4iZI>o2B2=TAsT3_1n1!O5mW$T>DL%=jlD-i~Igc@ESigy-x*j zO_gT29lX%g2c$XcoEOOxO7%8?#`2ih8HWoX;T$R9Hx`ga1sb*G_I7GwvdbF%ce40x zX(^}WomPmv8j;$XeH$*=2S5J2eKbgTeI))^rieJ_6I9i9=N#OKdg z&eB{#;1QxxC@b5ZwDw!E719LC5ix`uP7oLCzb~ z>EMkWdFOKTh3K7ODV@XEXo@}k5sKoyzAA8$E|w1p1Vb~qXO28>iedC?Mv3#KPLKP( zUpbl2em{r!z=rqgGuu8L!&^jdpinHzYS1LTAZv^Ad4 zV-c4F2@7K8J*3~qkEanO0z;+;53XjHP$reVE8vkJwtJ={=(REZPT)ZWDbrjwP^_Gu zV!^Yr%xYD zqx~ZB{!F<(T6)__n|6_){jpcU^QvoO0kL#1{2?8F>`hzIUy_pgoRda=@TR>VtgiAj z%yqYz$dcs-jW}qk3ZqD3CLJkP7^xx>MqSXNPQNU*41OMt4gbvFy}%(5zoLeswJz_; zH&npCYD7VD!X1;_3>;tVSASSDY(Kth2wHEVEZ6$@gg+KWG@P-#V8tsLlI_lg>r$icaSL7wlI1d5fv#txP>EvLo<#>)J1Jt#N0p9$7XATUNjb~HsTe>vdqv;FrV``(feqMFl=Jw6w2HExqa)%<74pp}bvw*4aXVA4*3U*2nf ze&^-IVxOm-O5>p4QJ6vr9w%fNiMX_ZF|RsSL%FQR@FURDv7$7o(c(1OoseHdGdUs_ z8fJ^$ePIzt$=8z@zQ0WeG1BXGS7Bpdzi04`c5m4bk+TnUXzc>44Ay30c5MA-TYA7P z;mD~Z>uVh3Um$RB+y;eg9B4lZxk;6N6>z8pb6_?v=>i)vk735@RpqFPvFhL`7jCmp} zuS+mJ;on8A-%Ze+dTPNCd+?@{tOsve5XB^{h}>ZW5Ju;sE>`2-WGBdYpd7Hc+6{C6KR&U69c9N~iHG)?nVOh38*(}rmIjDZ z@4VndV1i}Y&g9(pc|JH?uTfB!Ur%6J_YuDKO8!1>UNXY~ueA9vTP&Dd^>`g)Lb~Og z9I>*d)Hm}Zj^JuPdaN~`zJR_Beoww%y?fjI(+1H*k90cxICRSge}KP%_cp@QJX+b*HW_gJ4-!SpYVV2vSSSJVa4`lX8B(iz$^ z*?IG}Bti6&H7nFCFUwn$xITkn{1^TWsa)T9eP%bK^_*ciz!g)|vBRjvGQ%#Nq)pm1 z%yC=HT+`*lv*MlcMua-}Os=#0-B!q9vKzmR4o}?9WPIz;3`eB}`)1+wIr_R9Qm%*P z@I<)Jp5sm{0oYeu+sw(=ThCRjL-W=~E2}m*fgmSAkLRy#Vf+a6a72E5<2fP-Q1 zlf8dWCB3?>;pvef-4iC#OTrOclgw$`7w&mG*>Sryw|qWr=B{NP)nw`ai$xni_Iut% zL^2(9s2&qFPL6uRK=G3aUeITB93RKq!JOBT7eZNCK!4HHJAGaUvJ~A9XW8+o;wXZD z2kiZKJbc~YyBGwU8m@!C)dX38bA5KSh1P+(Pk8OSA;F-wy>xV^)HE%>@x;U+t}d*N zixr9IxAl!A#O8YT^Sa3JLAc~N#l&)2)9K7Eln$<{X8-sRCfMl*gvOiau!n8WQ1;zdQb4EKN?#sxFdyyO1y;t$ClK3A` zuY#h(qiaz_N@m~fJF~vMH3zx3$E5qV&g~Ky1R|30(geTn9iGIJw|?E*D?%fqZ@@DG zvG(=W9%n0OKde+-)+zIdSBG!o9)D-9uXwm_^3AN%0cB9X;B0Eh`-rzOnW_^`E;9CyF=4Ple7;eQ_7~SM=vSp9`0gF`BqerfiCn0v?6l7>6^6j+ zt8cwm>08nd6$&5{)=SA4f%(e`kKE=^sab! zA6yPI9hBxgFoU zlD$s%-;CsMS%=HYCJS?S>%*M+j>~UN`m;7!7#Lok!Xcact03Zs_it7a!VdOuUjBoJ zbC7IM+WDwx48s;4?jSv~HQP1tz)r`;(R=?njLR}1h$>!m`l_7V9muI>M!t<_F!zO) zS6@T^7vH5r2g7YjC&(p+V{;tLvEfh*v|L@Pz30~^$eP>VAM$CyES>C+V~L$-+jiw?-i^jWPLYhzfHBO%3ySfeLWAQ>Kby|$K+QbICpx=U z8l3MAFFXpWqF}l=+)#NqWa0TyXUtRe#IT0Bb9ZBox0%9((*sB?#(q=|yQ)6*$LW^H z?5w$l_wUncj++hP+D)cvzX?`Gf0T87pmn3)9~wkAmr31}`u81g%I1zqX-!W7voRQ^ zXk&M4q8*e3=ZNw1d55w66AHbS3crgZbQ>MEQ$78VMfJPlGR;DgcwW0ox?m&(pAVbs7r5SS z68P`8e&<(+U301fu=nRt7{xbK;r!}?1R5ahPNOWRr)XH7quYmtmP2K7njEmf#o;tL zGpV3@KUbc&i<|F#a3El3Y?!<|zO+~ottg1;mSGGRd5ZPw8D$tGdx!6CbrFoh zdpl|~Hp8zRPzr_EURJWoailX|{S9A^`}LZVh4Mk)Mgx~+0iIa_CCIr8ljuING1f_E ztiHU8%r=PLP`@{L?c2{WE-MY)fOE`jQ@qu6!K1|;W4qb?U930F=d1dq&AWDLZ=SwW zkbirbytHzcrn&XA)=A|ZK$GKd187(){RdoZ-qe#izi`FZV zLiKhIb+N2{M9~OAtba(vde&MmgVR~Y_J;i4opn-}3(+9urc35EtlxLDzt!fk9z8R& zUk#pb-UJ3J>9K-NnqkjvNI2Sk7pf83oDALXd+za)^Iyqnvy=!aZq0t^Gb*k+y{A>L zUZz~d)w{l7;Kz*IJ!6=deCO3n&Yj>O?nP;-d1Fek5})f$jO2@I#WMP zHY)M6M{=`yh=4`7Bp?~^&!}hdT#@uI<*Ykg{9aw0c+IY1d-_u8Yu^oqoTW;}EE+Nz9?q+Acd0l5Uqxjb@HGUYfG$qtYhjYk` zo%cSWW_r+_db2uKJA7ezs^?h6?~?n!0e$B@NRO2-!C2B8^j;#f(dYK-ZliT%8^0E+ zYkF~qU|To2+rweW43GqX$EkUn_;Hwd?k4iDt5%7BMdvwK;-dKMa68T|#DvyY3yMqE zS&m`8xq#k`bq%AE{b{_DIUNFIJfK&i!xNnou+lSsBUgl`gV;qPq ziLwiQ&?itTLsYv=3gJZ!HXM4bkJHqo}3+B7x?DcxcU<~!K>fwpd zorQG+IhI+KLV-QTuG*_Uopuhuy`>Co{+YooTOGo(P)>1xxO7opY3CjAL_V_y9x-7Y|SXka~ zA|39+4gJ$W#NSxzc{ppz%GVao$91hv+Jy}~ReWsCV;1G`TPE#SaB;SrZ;o`ic#6gG zrY-Ft-`2vLQeOKEdQ%;8jbEuB^S(76llSgryG#iXEdFMBmik^+3)Eit?I##yFO<}r zJL{Q-+)X?(n0O4*XL8vwzj(emfumE2S-Qe{a};WHV(#??cCJD;$#UGLM|ZZ(_Evn- zz8GMF+h#HfER<0(xRrFuBI7G3$&j(`&66(qB8D>-(u%-5HYa?^f$L^(cKzJ;*N;zJ zJG}Ip^gp%30^fqvD>tu7Sn8y;+VAfCHRhO{+})2(wetfwnR2ACB%P#RgkZn4hf9Xq zKihDyG}7pYPQi$|`y(;VOe*B39k*IpFfq1c7bp}$%+3@&Aq4Mt`oPcluX&w=h^W3k zGVkM9;{Eu|-hw`?M_KuHce|>U5v1-k68*|FT~?#q4bG$B}}x@lMua0*PUYh{CC-rWpIX zWG?YTug}ZxgE+Tyr7m24aW$-yeP7jumgM{1Xsx^?`z;MbICHokrl+!Bs0oNc zP-0jSApRm|Bm6WUtB~6bs=nDBa~I}&(BJJ{p*OAaX4IK~ec*aKKPzX}S`G6ck=)Fl z3K#yTb`AXmFd@)DoFg7n2Z#zoL+2U$K&!rQg`Bsg{vJvmbLJyO?}5|8fuMhfRH6L8 zrWoRO-UhVZp9xj?>?I`3dd0r4$e)?s_reCU#M`lIBjwN^bMM86j(#oRnf346+0BW` z?iIhB1hX*d(ERD4*-aLG5<{3af)`G|3HiiAUe|T=xLVEfkTK>SF--&NIR-Cx8R8H| z4#qmV+%aNEqU^Gsn}_3_2Xxip_67IE(rcT1bh3wEDk>N4#v$X?ydB1fJf@Y zi{G4Lml?Mjq(69s5bRgz@I?|ToPlr;Ss|~x-a)@*BHQe24s#jAr zo*RsxM=qYLIaIs%+-Ft#)O34|m+1M_15SN4pULsBI2~P5Q6)E_poIv;iaw=nIA)SG zyde`f^*M_V2;PLO5=li*%~=lyd$D)-kafp#m|Un^bT$xs$~2i`Hwl3jjnbV2JpDt50G+ZXxFWKQp)e^F;9_Y`#;JEVV@ek3QwRJYg zlqWv>c-D5pov*Ju8I(?d`Xjjg! z)UR!RqWHOJ$?>`@>j|T5#?{8-IKd60c~fkeOjvb%POVOromx|zHXS|lRQgFfdh2xc zu4s=nFJzzAF#SB@hnnPs+P063buVwZ)vO|SJx-qg$7$ZXz1aLR&QW*otG798wXQ?o z-Qa&1Zl3twKyCA;=%k?&d|BAz){u&s8f7RUwx`BaufoIJHkB~=*)AnQ({2!w1aIrT z<~@$v1{&#OGhEQtR}bS`v((GAWdbBbK4Q-z&UX2$X@epz*nW2L+PM&~ado8JhM@gZ zKNxIdC!V~!>m6n70`I7&3Lx{BZu-TZVy?0`c5Cunbot~e-gMY8J3msnJJ(H|b9(?< zK&8JPpRJ4ej;|MK@KlDs=i&a&UQ3cTYqRg?jRbv~KU^Pu;j2VU za2cx-S`6)T1&+(j7tX!17eY=e(1!IA+>6}Bq<>Ec{@gh8HCV*I#oEA*lZur5cJH2E z#RuQ*?@7J0)=L+Dd<6j6-4D&_wF$aI<1=$HwaT@gZSm&J?ca*KM3%4Q@p^1sR4}-h z86`P4eEf!spxIU_GZ0yj`c6g0R{8^Td?63Ao1}~T%?EMn_Yn5P{l-`hlQd@AZfJ2n*|~@IbqTX& zY^{o?!L#XkbO{$<8sET=#4A#s)42>sw)GArV|s}3TV87q2uyli*T3GdPs84mpQyQT z`|Ye@0`Y%D?Y)}n3z`t-?UNYGRJz=>L^)K=Us_;a3~tLYIis75KbiG?24z#E(Q!`S z6EcJPZSI+`8r$*L_MC|F`PS#QR~d#L{{1KqfME9g$XQzM8UwTNZYbj&JV&*p`)9qh z+B|4DyKSMKF;$3Yy-#fNOq@IYbr{SpxtQmAxW4;c4)8b|8sp6C+pz>2T&}Rg@zi1= z%|J~Ye;`Xl`-<3+=%Z?>4xM(Qo^b^RA_xeM(xgE*YP5NX3!}oWCNy~IZumcQk_<6E zJAZbv(CDW>7u2{^CvoaFwM5B2$b6-ATI$!FO`f2%)PBvt^i)sFiNdy&hq%3*dk&rt zp0Hxwy`9%;G4YFp(%7;P=X>s=exp+oN+IzLebWfTUXmf9+A|*rPn*GLs1B9!BP^zn zf-l6imb;YsW><7seJCT}nwWu>O_f<)6@#p}zmV2+p1ky zRjKPWZxY-iNJZNYvexeKZn|L>*|26d%2tQQxxn%x_B(&m^ay9=%)ewC3$62QGeELU z++KfefQh=hdt@g|)|TcN7@O3cJ}uSo=RW5F+-7^-Xh+UBD;Uv2LVaMCN#GVC{b~n` zO<}OY{#pb(`a))z`-5Ip+zA%t>W$%_)gTQOV(ffs{=_6<)^5FZ-3;BeM1kvSs?AkW zJjCocI=%gQU2?{_z4(T)Dg4L0EA+3$%2oaR`pgrL6O(?pvnyHtSrjt#@anwkduSS+ z766hOo>%VJdBfdY^gDDO>CIuXx1m z!5$A6;A#A^US?S@>hWDGd%eGtSX}dkqtOea=$OdEO48aR_0{>a=h(aLO7z>V)$c4L z@S@gJ(Z~=$H-^MTh)3$1hENwWU66yWd7^na8H*d*gF>;|uyC zM@&67NV{dJzL}vK#I|D-X4KMf->R(^V1fR`AcjBjX6F)xrMc*}NSkSsR4=lERtujM zv|@MSxrQun(7QGjqK`d0X=R37Cmmkx>yj=v1FeSCfp(bjH6T17R;>7c>o)C>y>u7j zZw^t?W(1FHwnoT*7uPUm{?{6{(*VZ>5htxow*`*%Fr-*b;1QDwy=cKDy>Z zMBfwbpE{w)regPm$!W(u5%WtBwjDINjfuhONDt{E(2QXgVDrUHZ;Q{0hcGKmkA!u0 zTL$-Z$7yKA(E7;TJV{IluA44DkM;G`hZ}YqVn@<;S6RtaNd#9jhrRER_Wcayo5I>E z5(U%|gyq~}g3MC)$Q&;Ty<;wowFXgbvlCBkx|mkJP-cPm)}au#K^g>@V`If^dh@Nk zjO?J;qADCXA?lxmXCAZ*i=(YJ%;#T$`Hco!F1H_!-F7kVp zYZnzIp-`A1dEeruZ*XLiLjJYx5s?V{SG%)i13PI0>3q(;&V9#`F|KiLUsO(~>bTa} zxc9c#SG76$`FjR3f*d(+`?4ndFeUjHEO3Nw2fkps>*1)eL0$gY9I8G4erb7utQLui@bb6lv*?nn0ge1^SNh z)YgaQ)1SYWbYddDEhui62gIo@;twheN)re`ldpG%b&?Px0U%A#x@IB}hHTV4jJ>uU zG`eqQI?@}6B#qh}ilPMnNi+W3{B;kdA0LI1U z(?Nf)jo$RwR+b*!eYZ2u?(&9f@Uy{^M;=e71M+QGq?TUdGErFcRcEZRfr3LgC|wHc zHmIZ^=7;EBen`t4QBr6en@hzW>s20wz+K{A{Jz8FGX5#9HJ`T5Ggn8z*U(%%D@#HW zx{@x?+uh>@p6xg&UXcAa6e2eFDfwfLE`J0*R-0PVZo7CfWLu7yay`RCkCM7LWc_P z`gI=8s)#sVbh*lFjITkt4lrA(k)qFWDD02Xhk&9puR>xJwiHY=nSI%sfh5@3h+l3a zgs00$gGqPq?A&?KwzPCgSRJzxtl43L!sVHswNF_%OVPo+_P@e%eO!4gRu93Bzn(4Q z4(>76K6z(@)RqVF;JIhjCIL5bA958S{UO6<#FKh;0wTm6nKs#R?gC&4D1mk`M7Pfp zYzkx+mCfonA{o>hbTE?#`P%0{Y}gE#>O)%*(;u3s97;`T0@x*^{6A@_m8_xk?mPEY zvp*8`YYUMj&X1xAb2f-55%zT!7x#M5Jz?+kL0R+th&OVq-W)oFnF7t{*H1mQO>|tZ zqsh+4x4e&WtnA)(oC*Eg-*0>(-%ICVL7xuV!zUOSr=qc(jF=sci*4TW%+D?LcVlF3#|?#q}y?b9RlG3lqjj-SNit z2*@v+tR(p=@NX}6Qw7rn{!I>Q{CqbO?%^`m84p)&b0H}qkV9Ry7u9y%8+G=egBR`B zA$6Wpz&q(+ie@;L>RVCUJAE@8LTl=_!S*DU5E##E$F_yl3=ypKex2U3+S`8dqiX+* z;6!-dShwDA-N4{5@_V>Co~g_mwQc)_eo6PB#KE+vZ9lV}Y&G2C`(f>#7YI{VoMMTV z-p5=|c6MYJZ5^ogA(pHzhHe>~PwBh{PSuO0hfpqcSs91TA{JEtqhAp&-F3a@I0z%q z??2Ul#&q6}4o@339B_{bJ(Fw0%ZbNzPGZ=Oi!W>0)zp4OdpLSntG zoFXYI(x#S5!b<*(%r~q&LQIj zG3)Zl=-87o_3BuPG~~Mrag`W6AWb@EFkH#uS2CNq2)YD+z zWCnacqbCFVe@MGXpV6ziNsD0*EdtS`ZCaB?>U?*d4_>+bEbV16X8nXj$5?BVkForu zYd${(#t|Y<0zs}{lonFkbj_&1qOfTFV z(wYletdj`uIum$_2D5;@&==+})LaZJv4(EguCu7M!B-BL%|{~|AtQCgt?0jL21uWn z`>?wYKXRS_$`2al&>w_vRU!IYp1G!^Lg{^jYnx{U33q9DF|B_u+T(Jq`b*FpBLL{X z5LrPh1+ihndrI5C-tG)28uT!Td2?n5lLoR?fQlw{oeW)kFWJQ<+N+DG68Y2d-2~Zd zoswLRQQSNKREGV^?lZ0ae9(UyoQ|?j++$A7J0EQMpM5LladaHAH~X$1aS&v+(O~AD zA-Gk2PGLnK40?=gD;w>b>pagj<|FA*G&p(HK z=N10{p~yoC{-WY>;U7!+YhGu0{%H@^Z+#P;XP>2@AK>w>rG*aO!r(}{A_jP zLtKU#?4J1>RTNfd%AfDXUlZofH=Zx)>OCL!#{+ex)NA+nhvQkqD1WPg>we}xr?q$1 zTJU8oHYVdYEXVFJSG7_8g{wMqrSCew(?D%wKcoj^EPc~kNBRD@=ki-pKpm{H%&%pF|ykJ`UG#CLzFjd|VTzjcE>f4bpt zXnXud-v@D@u|Mhje;1^6uJk$?UuC_g%`-Pr?t4Q9kMjGsH6+{>U?n0X}Y zFw)HOF<|F4!C&k5a_X;lXLd=;IF9Z+4x220p=AHaa4C4tDGqk%}TFQq9VP* z@y?k)I%VZI9-LnEIUr*oLog~lP^-J9Fi$np{PX^MI0a_ zsdS)pNN9FUo3Ut1HHTe9W z9;cpt(D2^!s;c@~RHb;Z^-ZHT+@gB_JPk8RLt<}yJR`sLq%-hXyZt*?!t^ZZMAJO2%?r(Zvq=ew|-6kp5N5iu037D~#6 zMS*P#7^)3Q+OVx3$LIF{E%@p0dilJ}{qfs9OimoWQ1O;e2^wCFA^*sKz!73GtnJc| zYxFt2`uNbBWpB50tOQX2ShMVaWHb#@6pTeHNl8?c6e&RzK@&<8B@`tQ1vFDlB2z&~ z(-0JnNaQUGLeWDI1VtedQcx6>lmtK&kswl~6;TZVKrBF2BG3twkP#>pg)~(S6)Y4} zLrnlx0#K9$NE9>_fl)*hr6EW(1tUr;OA?JVG!!KOG*YWbP*Tu8mxhpaM4MnTQXry+ zs2WkAprDGdNBJLkrJC@R%UGOSZ6Ofk$);$-9a=Tc703Idu(f74%s z&+8@OS)Apy-uc>ZfRu|lCq-vO7WudhGkW}%r%r9 zrGph-dB+q-bRWyxaOD3_-P@eM2kaT?V@$li_V&sF}C{XI4ces^63f5*x7SC@{4dene7k-Z zrjn)~*KNN1X)xTrpA$L$ncO6(zW9kBL=o> zPfFgxMW*d8%r=*65O0P$mo9&G1UvS_JPJ$$VZ+d1iP^Ol1nC5Qn%F1ayh<4e#@HpQ z_J6>#I7VaC@c=%JdDg-ujqs)4xaA7$&`|W}Z26E+6)k3q(7K?7l&iJWKt+y$9rK>{`|f*m z^V{2+x&c3khA1} zOMiy_J-Xa4wqDuzH`n&o-2PwLzaP1OU-tiR_RDLo3jHL$1hZV%mHn!O6F~+I0jlcv z@-J|8=J$xBlKkm(@D{O?)Kwb3+fH5_miBYg(pZ73>B)2I^}?~n#T*&zs>;`zZuLZK z&9gk`Cy}4{I+#%0V!SRhtJai=Xo-i7anoM)i-nPWY6!(hopmf0bDd7~xBZxmhnf!U z|LQ>aaQbvMN2djmEMdFf&xxPB+$y!xf5Yy`=j+*@^W*=2_J3dff6rL|Tlf0Je!r*v z&FkR%h3W1zVCV1i{67Of#;V`1{$6KK`h5R*`RA8`{5+=}d4fLvJ)k*J(-yYMsYWoA zQrAqvc^_Y0=B=J(o8@7Xxen@z(`1ddFG?aGbo%bE|9exJJ?+nlTwJ`xWrrMJf+|4e z(5Aqr_U`iDZ>@EmG7~)Jq~luVkh#LSIB{=$M3{^-Q=H~8*G}8L>jA_Knm1c?F!;Ob z;+oaowHJ*MQgJ)0&T65*6Z7%C=d;~>;?d;rp?~~+R}WQ+(k5N zthk9R(WbuQltZzatwK?liuT%J*6X>~`k8mo$$>|xeD(ET*AVsNZu!q?u;w z`)^q4L-x2}9-zOD$6Qrr_j&R_mu$FtZt5erE$D^Lq$<>lDMlFZv15eSS#IME@c4Vr z9cAA;e(H4i(MKIQ5aU%1{vXb!PJ(6WdNG}Kcik0^P9G>MtL43E+G8n&5!Ewjo|=uk z%IhH-S=UdfGJ0x~o@9*|j+BEDCr-2xcE$wVqgA&o=mg1W=I}f57VlE?+8&LG>j-Ncm=RQlv zrHR0votC|Fhe`3OXBUrD%7VI))jF+>RUDk+4e%>BzB9~ojFz6Ra! zsk6(ufVQ|9>Q-7Tb4jbcqO|KO2$kzu#0|AI)a%IOUR>ObbX}s9<3TKS`@4ggX*a-L zvpS2-t|T&&!$lX2JBhyf^*C%!&-(I4LcJ|Bdr=DVLt>rI=qiX{AGMqzx4cn<>7oQN z2LS5n#&Kk4tYNJ~ljPh*xNk=J#Zj4>6Fk1!MkF=UHOWR0sa_b_q{IDteB3HxJE?5y zUEnunOg~Gj^rh;yGOxIDU~IvWvpA*=Ii(9b>u`vWvP#(+PF_oyo?;Q{vyRwU)zr%f zh%XZRVEE9$V%4a<9<_D{N5P2ku3T>VROJBYICvOrC~-=8d!DqDSyAINPP)YEsJm&A zt74tkK)ePJF9exV!2$KtN_x&Vvy(b`WV%0gAj~>3nvvIJZ202m151yFM9~Y!D(~dn zcST;}gy6=Rz}C=e0Z(i}mg*y^XgiMgi$+f*_SuC%in^0Ld^wh)@7>vo-t*PxJgt^# zLKYJw4fq;fn>a-7%X_YORj`-C14IzZ9Fd|Ir3~9I_dz#(ztml!Fv63H$1}}SsT+7dI z4L`$Za81}beXX}s7|gwzDqK&p8>Y&W6s?0oWs-%5wlzmL=4T5Yq*H`XdR|TL)&Ig{ zuzs1mS3mLhfq3?undr$A81oZ3rli!(dBcLSbmaTIOJW64q3a&5wR*kWwtn#Yy*3*q zVhVwVUk0|^sg1ZeXUiSk&2CO%jl!dOjjz@b$Cf(KV(062J*;_zseZOZVt0`JN}?X& zwQ)tYHGGWNX&D?kJbnhwBuns^fH!XA*G zh1ZRtJ+RaVoHtHnqqf*xoc=2~b zuYL_!Xm3eV4mN#9STlR3f&M$i2Md$0dFTH6Xj8Wj=stP|+w}74cfnm}cd5s#@dk0zwCXar2{YDpfqG?_&-3q_z>L!yoHPw`@X=>CgPfL(Wcc-;bBfh@ zyRof=IlOsQCS%P_cN!QjhfA0iIR4f8g^-Zgx^LrCUv7^lMu zLTsNQ-xF|ejpJHG5#MAL{EZO`>=>>tbpxkzQ+cR8+&2M6So`$x=w+z5UbQ}rdE(`> z#9#Gxf-$cm=dP%jzld%EtKa!vNT^+E&91T)4Gq2b-_wb|#jXCC?lybK$@GM?)b}tk zd2?J*dt;6*Jr*Lchf?4QtltE>*MwZRnyG9z{lDGTrZ13$a%U-_q5bYG*y6%PKZwgb^y2SMqT@00m z-D>M`p%{hG%O^p%wU^_J@|ccc!qpBNljZT&X6ROMs$vZoeFga|hUCfwrzE0NX;w!X zGV+_V2Kf9YweLDf{BO|)+;FwuPaI)1%zJ&@es`O}mM#&>4F)dSg&FLR>TF~4S-Kam zDLE+8&H_aGKPfmBuWTO{-Zkt<9`CU}Kmo<5G`OD_>TQcz4#SVlExqI@YxE*p@_Xs}i3ZG_xsj ze(#%gRO>9;O6CufPA0Wz+P!1Tu*NX`oa!fWx|yAyXTbO?%#Iq)b9q(woOvE%1Xo>4 zd7-<7oSr43yJ-t?{O&m;na4)^`mAf4SLx*S)NAcXW#f0han^O~eP-k@^5_1)_X}J8 zHvfnU))u!!3-^s+&b@m7rB}?WpPqm4`^|N8j5G*VXAO{Ne}?M$k@w=;uggj1Km6ss zhwD7f#9;N_^=^Kz$j001ISxFaz~@QG$1#3FCfL(QdQfn1(}n+kn1KKC>syaM>)-yK ze;-TNzdT@vHadT$Ny3Hswr4L;Pci=cw=mIr=?X4;QqjLIzk2;w;q@MAzpa<`Woz)Q z<~v_YvgS9__76SN;^BsC&(}BBuT`~iK2CW_qd3#&Xlt!JaNhZg=3h&^ZzOyDYu=j! zdd#QHG_R%af%&hAmfr~5RfoO)d0(1eB_H=+Y!t!|=g-nRzGJVf;=bvI9t_mZa?+35 zy<%^I>UUI^*H>xM)}G@h-+bc4Hbvq$vEA{^2&<)-IosURWw1nouQz^Y=P#?~>v)$C zP9sk5ePxU&?ft`%zxuaWlt0U%-W2~APwd5Aq0i?(cNp3 z_lOhbHJCr7H=Cp5_k3%p$;bPuHe(%OW+0&q)@P}9@bb6c&zYrJ2jmU1e9q|K;+j_J z-{xEMQQv%@iZqAYe*aFx9@|+%Cy(ZH>E3@SHGjV^;h!{h9r9e)TR)}!eb@c}_H-FbpI;5`1mx!gw2?p2(e-s=}|>7o#i? zNm)Wl2-^rqsSk*ASjmau!628%el;=JkAgi~i6X;{NYMbCob%gk2e@H!1Dg)Mbg(gO z3CUx<=s%krHp}^oXdXQ8$A=^o`bpaQbJFDl{Ut+$^SnJX)1l_`EF-VqwiSM;CqU86 z2_oM?z_btrvtVp1jspNO{@D$0p6no-?f@44;rn&>?P~qTJ$k+V9AWT-`+vwjfALUf z`~NWyMzVn6!0h}oi#7U*@-e+cm@V=3^e+3Cq>1Yw%NxSSOnllB#=21$o18_>REVsp z!`iDhi_#N(7R*gzp7nUCb_8PV!W@kej8(Gag@(Bd~DD zFgRiTe_7O&vwXGSs4waJxrSu}f9;i6&0^BXUlC|C{8km*OhIzjV@|Ot{;PVN(GYUc z8Ij}tx~eh1gXTZ4cZ1GP;#(7>Jij2wu7ygMh0?~Hi=e_?{!H+|VIr@M&ToinaVF}S z_wV0%sjHU+IKbOxQZE|XiJ!cvUn4qz+Fw8X@6Y&atQ>#i|7ZD3u0Qn8_SgMC>H6+* zKI8izc*S?L&xXXyXu;u(|N4L4_x&-2`fC(FwDfoV|34%5y32j<`ZwoD!3O7~+lGiR zmu+56T_He(wyQVBm;nq;C4!mGv>|C(6R+aKM*-_@9I~~_S7!mO{BJ_;f0t9%bS^7h zClRtFX`7^~GOriraN_r9uj-uMtHb>sWc!Na21W**Asg!>RV z;we!Qi7OYG%nVkcp++6n5@&kCXAYw%tkvt?@`6 zqmMoKxM$5%E@#&|?yEcA*@NKq;#%?_I7d|P8ZC*89FfCE6Sc??vps^%ritb5dCsmK zne*Mhd%-kQa%T*C(> z>T6ej?(Q(0)7AK&Giw~(Vz}TSL_sV3Pp>7fael@D>W5Q8Zcfs9a-aYH{=ChoWO({m zzxdwu)l+pELR$j!p8qZS=y_ON&2Lxl)&_He%JALHU9hER+`CS*P(0RHyeqnW0u-5t zx*fn2(iJ+cCJQeZD=Wj!pl>xTE-cvp-QCS!h&jT*Zblsz4oa0Y`A<#Ci3`J0^@e^N z{#B^5fqU8x)WrDinui~iT^c@g*4fT9zfUk5m}nD|d@vzGH zeiyE7Dr%Ow>Bk=yqWP#3o6ZF10sJ?|`x%(-X~oxhxG}nBd#3-%+Fu1V^CH4??@^3M z#QC1?A4|)<$5d~k@>_p~uTI9ju6d8Dh!kOmt|u@e(wPWiM8FfiCjDXgp^cW8 z3_a@Y95Tv6pPWz$c%F3;NNj$XN9OfU3}|!G4*sG?RiWorx|?bl&b55LJ=S#j?H;;a zO_RIh+sQ8H$tI8Q$99XpNLuIA>uT0B;U1^qPS4b?-kMGNjyb8XBwUT@T-P-a=}S&s zd5CvoX`@J9c-7$7WwI5Yni9kuJCGy9{YUCvt?Mk9&IF$H_!`BPu~7}7^72Jz0~>xV zVYK~fwtUZAb|YN7oUZG34dcXO2ZeMmHHn`o`2&>t&Cihf$;7e_LGv3ujFa03BKd4( z%M+bu|L|rpjPNz9{-{RGW$8U$--S4e)7ZP?`Nw?Ye z9bNgZv$-K`L-q@}NPj`bx&FAFsplX1x_#9e>q(TMzJxq(ukLw5f*VI(EVJ=`*qNPS zQPdgB&q!7hx`kal5dkBPoJ-z7KJ_Jj`WKbyYgdO9qAkV9SXT-9cLVKPW?uk z`sicG`0wiluJ0aZVW4&D@)v!x^VeRthGy+9mqn~brKdt=IO%coSmp42)(l}o)AMsS z=L+At?|*pXZc4yTy=&ZvnOls+XP2k_Fn`RC<^cb^4|)G+AMzl8NvM?m^h5M~J?~$y zQ$2k?sp3CX&-g#=2OsD8^gsG`;>rKF{eRp3cm01IHviRzAM3U^@;~!%ANl*!{<9HK zNQ)6ch-TjVbNOHWcAO;Ta>HJ<|Gaby`<0!7#k%R6sA6-Zy1R7#V&XeA!$;jfU;mK+ zdCm6hUi->*>wjPC`@XsQ{^#(^4Xr;FLu7``7i6i|tm6(M>Z(^+{wymv!l9c{gm7pEd9WfxiG9znGeECv0qo4Mt#qsT* z8>XGa=SThNiMpVP9K+vGu(N*ae4Os{jQRfWB918I9Gr%CCj7oHe_77PwKumbrY-8+*fvg?TtH?-%T+bHh@bl=n1*++Nt z#mSlE7jYs!-f&>|yX@ONo=uaqE-qeKmPGb%r;f7MbzLyS_1#{&#SzxNy64RJcK8{0 z=*86aySRmRsPY@BmM5aJUFRk)X`7DDq~1=|RYj7z+uW6mQ$ir*WqA0vAp5$`7J9NwljXP zhF)*X;q}mLeRtHlzG@&P({Y)Zn8tD6m&JWuiEs5oW4pi^^iesjdh^Eb#0sFdjS&Mq z@m>Zmy*H*)-D)H7vG`e~j`8of z>8oy2$6af>I80|NF(MzOOtdQe#W+N?^6&ot&QIg{)waTjj6_tnv0}t#rbsLHzvcQH zKl++KWBvc_{+)OKO_{y_=GRW%PdvUK!*l)7*$@3cuI|8&g@31i^|zVs_QO6q0ssYC zzz+ZU`+6J$5NUzW{_Kx|!;(N7=l(-oUTqC-B0||4`Cb75lH)az6*C4-LoE~pX%#`K z1tcL9G$ML_NUa*{oC!Su|uZk{&;-e7{^wDRAr!1a;{JX z!i3^-ge4`4geq6=n7~sJLPQa94u6zH$O!Y~$CsuCO%#P9P-8<8&RbypPzn^{||Nc%-(ZXb<09?Vm{M~1FN+U4rE5US8b>DPTL2ZwM*4kEAWn3t^9-&oa1&3r_u{?yY{jrl7HgByoGZDA- z^*=85&6D>ZrN5opBp*7XKS%g7PKW=u*-LK2gic+q#oEko|LU!IokeQG!>PCQ5{-60 zfgKP2z(}i)>ni(k`!)G+PyCsD-=Y1VT>G!2-bDsrJmzNOp{=#>Z%r@qZLQeijO{BA_dcVtnox9zS%4te+r5Jl!3z{-WfcWv zeuM#p&_pE>1vFJ9Q$t8<{ych=)%yIpKF*EWP=U^HeoPr<2#V=RX^B!GFrz2&b+zcT zFzm}^(JhvYG8}>=Xd0*K^S4whxC$B@Fe5QaQh}m?qL`GTfruYC3?a3MQW-2rF$qBs zL?s0@QqWNoO(?5jBP#B7$fXKqCS}3KY$r>pHV$iCpgz1D_gZ?M4`$bzhkkzIuaHCRzi&eC8JpcQ3WLw&+FB<6nzHgyT$r zvI~^Ool_%D6ru8yQ8gtL($hzb5@Ip{( zBBB|pB&vvssE70k+(haFCW-~f42l|7kfwqqDGG^cq<0`0EhSKDN@~KfMI)4HR;qnX z$DYT~`s_b*jrC$Y#Wl2%5&X|x|3kAnKaaN`hhI~l2Dt5z>pb5FeV@Je+KUk$P5o5Q zemB>%C#AoWB&f)y7P3?n{pm1RCNW4+1RoU1B_~mo73!TBC@7xcr_aCs8q^&$JBN*# z=(pbWhf*jVRkSMzp`}GKLO`YoF)>pluKrHP$CJ*S3Sm(+;71vJzGS|pOaH{Aa0 z6;#QTAbrR|h`5y$HT}MrZ6_Q47;Nz&^;6fzLtVNRtYx&s1k#*L%tjGGlZjz2d)Vk` zh%ZdYYKV}Ef-0&ZB!ZvTW6-&wQT~pXh-Ok|6`d4RtEF-xE>*(nB}7z2#_6Y^`xNc^ zdy_Wb4fj5lLq7o6&$lv8gYP)|ASqv`+VlBtYKA{?&{6zBu_)j8Jf2zX&yaZj?c`DD z{-91gby?eQZHwr5ST}=ciQzNvID9$YR2T2 z2$~3|Mf%(Q`NP2=@i@Zhl%kTRFvL@as$Q&RY*AE10R0)!!~2HdAGA1yBzs~I9}Q6xRt&eN_POYQfymnUUZ+w%Ki zvo5{!km&W-rOW?XNPg>`p=lX?`Y)`xG%qQ})rdAF_TFn8>EEN|>QkI1h*f=mu9N== zGg$@J5>yvTDvOj66h%bSsFfZ4uDG<#Z7SnKfkbes0ytA53zQTmVbKkXYxiMx1vWah zowDg97fk63baDWSii@+k5ekV2LT4ho3xORnyJ8&wZP@`)4opaCwUGz$&G^;{zxMq4 z_B_Akmn4OBh$O1`*@*bSGlYd3N#t-i!&^NE16k#rfaRN8p%sipjeweUf>~8D$wViipvb z!lI(6IX;YKVh+N9Af2}M&$kYrIs1r!xgB@`4<6iHPvK|x4RB5JmW z6Ql{tghX=5A}R`w5l29oT5#cVpU}{O)z?ei^{lVbT3?Pt-nlH<5V)1A4V8&KYAypM>97%-L4kbkw zC_-VKVI+u#EV3A)im0fXCYXYhM<}WeL?uZ!oGOaCWv%~F(4eOlQdC7mQ58ok$#iRc za7ZpiRS|NEWy2MKfGiBQp-@i#hFjY(Sg~5zh^mT+q&)vGUvC=yb7=c(qKRL(#dUV+ z!SBBwhw`2NU1>H!eUcT<;gFd7p-?_JB-ufC=~V~Pw|8Nq)#XW{^&V?)T2s784gV<(xm{jlp&Bg zDUuX{{?LX%Y7q)hDFea)!T=~uLQ*MCKvCx_kdyyG2Os(N6zauw zz<)3QozM@2QIf!-2ipLTwg0}Mzy799UKlcFHZ}2K*_Kfa9_lnjDd?Z`afz0^U8$e@ zJku_mSV{CXoeB%LT+G7BPhvt)01tMxm#Tg7aVNqemNl*0tSa|3pe6>74nuaq-LXKo9Jo0Vm zu((lj?^ck|;9PZ%MK^CG;m9EK3q=V{BfQFJpoyY{p_mlp1p-p;--+z1gS`V0n_Bb* zha;S*fP=){K)6%LY8t`XAFGVZXfkxl97$7BTW8LHHUhs_^lSv``60j`AO5T-ULk8* zNELKF5n5ZyLUqamc!yl3vmGikzk@T}>qGn67ejKjnfEHt!iGUKv8@eg1- zn^^;^55a-GEX=-{Oq}^~ygbizhr%AG{Rt-=8|eI5(A<;or*t?|!EKF1L{(YfPKfq_ zh^-FD8>~1`LH<#N6Pc5u0~f$f7Bok5LFXLF)RFJqTolv*QArZeKiMXuP*``XqntP#U^~NjyHBT`|7L5uD$deGm~80u_5D+AK{7uE`;m$ zYX;&LnN~5erkQguJ8hAQjyPY5m4^P7XMMS_C;|v$_jpJTTs!yEgM(YHo!zg@+qZTd zJ*@m#Ve#Hza58oitr2pHDbs)HjVS>Q+&=7!Vr3z;X z4!ER!K=Lz=U*V-S>K_;Af%{D>Mt(gqKRYMd{=Ijd6fyi#s%L(1&Mi$v z7^@qF3_;~3K}Kn4qVJ8wIZ7~8*>%BuOJ?rUeN8>s zyI%RNiTz&tG4W~&OMwbv+jwH=(x3X=z3^5JR)vTo;BOk`^r@ z2;<3od3O_d?Q-Nh24wF}KZ3&qH*K~nny95LDj!AyCu#bYS*%LGU&wyK1-@vDxSUux zmazcmzmI1GYQQ3Moz3TMYU!Ov^^hoOQlAHdlk~9%*qsFwSl;R6oHeDwdM4-*y+bg8)-NtiKL+ zo$V(PPBT#IbWGdarF2a~j}m^z`BSVa2Z(y~QW}9(A9fX5gbidAQbSUtSZnnA@``Lh zeL7lc{#4!E=KN-AYHhi{L5{j$xSDmA8M9xHL#NQ@n;?2W3^<*`b^cri{k~^-`3KW&hJKwpH58a?PfK*0)%Io8`U;hiy z=x)N@9=`f-^8En>V1FPsjwGJO-F{GM_D>xMcuH{q(I<~*p`R;%Yk!}4fut5`n%OpP z8evfSA&6{0y@%MaGojL_A>M7lqxSm4@;RrYj2`{CHA-B2q!p4~)L|UD!~E_zUGLkt z$CfTJw%jev%!# zXccsC+w?kg>}+eF>FXrt14+xoueS}u597#x$6 z!q1j-HO#O(hfI9^Z1IrsoVg+#Ch2DqYx3i5o*vd1FJAdN5)U16EImZ5s)5uiz8F-d z+``nirwDoPQm|bgOLmE+vK^`fSE$3$r^yF|Rf#8=PNuMup{;s>#zPVs))G#7QP+_cx2pYi|A|$?;?fSMK^!ZPHx0N^RlgxKNf}uHJhYznv2mta? z2ZQzBw^~iF?U3#^R}B$DAbsFHuXR&mMG*2{-@n>=L%LQphB0s3^4@9Wr$%p#40>~{ zH>ziAG4eUYQQah7TCz@IWbWjKKiN;Ik(m30H1u@~zU0ZP&z#>hF3+b#^M zo+lV6V*u1rq#@%ZtPM>RB`p~vko3$Q&oQZd<>J%{K!3i;H-Bt{!hI*sG^cqIkvm?Q zF%+m`O6ZX4NeoDFfqqq~Gl*~mhWMULbB!SmU8FXU5Av)oQ2Nd>>ewd)iW+u7Kv0D- z1qxEq4I+1(hLEHw8bN44p;J;0Nk%_9$5sGP#F3K{QjsJN#UBBn_SD5~9ni(moS3K60^ZC0uXIsRb# zh9ym}@cr>ntG>YK@R}f@h`fC*htOg|gr($sgG4wySC*3ytS(p>l?+UYo4&oUy#43o z$LQ{!lea_d#oM<>-|{drT+Ftg6gFf0Q)*^mRS{HFeW@$&@QPvb5fv0e_DUM~^UiuJ zG|c4D$HLRo4i{aJ+obCnbb;Y|C-z}uCG)I@%+2mWH z?)DEfRjpye-%}t3k?30BlOQeGv@tob;QnThPi=|q-h%h*<%V{?y=ScS2&Bj=Qm5|1thUvqpvH@W6fhlHhK@h>}K`mw`X>q8Q1UuAAysrY1kLgxj%# z1z>&l-1}x{ySJJ`|6LemEcd$GZ_kofTMTSaQ%%p=+19ZwqY*<)ZgHeFr>WFu5y(N6Ku2_tp^d{Cbf(MB)h1CWmBD^pNz0&=v&G^zEFr z)b23nsgXrQMF)|Z@g@Dcch(&W{n!Uzza+_KUmq;vuR4_SMVXdNn*>AZ%xlwM9Sy$* z)2Fch)6!Y+l-ict;+c(p?J^7m7>%SlR%8r>{_VT*uYCsHkmV!(%Pw3@L-Ri$yk)zG z>6cJeDQk}J_JVU+e%x8ZHcHCjrYwj9B7w^)Cy$KaN2N$oG^gzuEeq66p6QZ-@?Eyo z_&yK7S3OR2Fg}S+T>;7;DazVyIqfcbXwrH2rbFKCI7LkOmQ4*1v4UlRL^!9{HA~ae z@Z)a`m0HY05|5mwK>b)3**Vn=TSmN030+!Nd_G4x{PzdM%6zRPe}iwe6pLx4L~0m{ zQN&i(o=keAz&?QSyp!K7?`gMlI`z@Cpwf2S-YjC~2f5tiB?W+gkAJLSQlH|NPXq^g zV!XFBEbaoG3sjoSLrC#)`lkk2_h&us+*!w`aPe#f5@Mo#$WQLxAK_$f5|yM%C~Ab}G7eyJQiLFaLIQ-M zfFcriCM{6V%rrDEq`)Am7@b*Ehb7TD{hs1Q8SWPl1C>-L3Zr&OL`4-zO$$v0P*So& z_l%*arlJ{&peO{=YbqS45`8Qy=$h%a9T_zI+``UKH*q#aLTKafCiojql1CdqF)9I# z7*sb(Vx*Te`RG&mZef{&hcgEac+}r=olcGiDZ~^s`}_m2KOfNgC%~T;FBLy^nLR#` zo|5*Sq>oea)9_|@^h}?la*6q#=VLwCqi=7f`KOuBJTj{@4g@cYD2^K-ER=rkB&^1} zdhi(-yDF$SF;x$CPg_VD3ALvu=%kVd{Gt(fx=d5^x6`NnPf4s_@Cnk9y?i zeD3*p*_4a$K(u&&ZXv%(HJA%stRk!~tPE>T>>lK3wau*S_A0$Ir5) zRp@r{!O;R5#R=%|-j#5o#le_@BCJGR7ENTugjH%E38B$ZO(51EKYDouFh}i3q6?gZ znS^l@Y^;+IW~*Zk_dh*3<%OE69dv@_+mO;7=r@(~iQ6fcRCkm$I)&W408Yj>N?==p zlT()D)X=*rkwPkpNQxS=E-D;I9GK&*8FVHWZTOR%9WDzWq>v{ZalgFm+nvcDZ2gRO zM-Qf=#`A;s>w9_t&lk(}hIHGc-!5Y$z*- z59iJfVT$5P+H-~^-6RgIfl!-_JRorM&o*j9ppp-!ti&~_RuJ^S|5GHAj0FH$As?pd zPBFP~dgHXbBTO{%cN#$MZXLN(6S?y(EEc5f*L2217_uKs_&>bgdAG{Xg0hIO?fi|` zN-3kS6N2rlODuJVz)et~UVT)6Xn|%BpYgH!j!1uoKS$&xAM5V{#_e^w_GJi%(hXsP zI&DgM3m8I(KcX+SMloYU>7e$4B#-A9ZDoctczXPP1dp#;q%q9B z%%cg7oJHD6f|X!(yvUGE2`#o6c#R4^7d0V>>bJ$3n|=tkl?hS#KNX6VS_8upH$-jH48VH*?)?|sf5sm*Sp(1#AN#Wdav!vJ; zX5Z?`;k2+|n{jifQJcAB(%3yZ&T2OojB6JScEt|8J@XjR4S)AkgW?;chLHK%q~og~ zkWeQ^2IrgB^~~Va0bHsdU*8CN-oW-i{D6F*Jli>?M1F;q+(Z|ENrZ2cxL>9P2}8@G zAEeR|Qt)M8O|Dt7&TeZ|*yKSqgz@*#azGnb= z-oukWOqMZG1qH#HiG9rwgiQwZ0a1j(xO(d$xIGZ77%FpVYApPhjWeOUT#CAX6OMVP6gQR%GF(XYO z5?)B7{1|>1H>dj?TFbek3_%kP5hOTP{|3BEH1~F8r9EhFGfPidF)&KiL1OswIdFU< zQC7fM9u#Jch|>dR$Vf2Cms&2UV8?AuR;dOURO;IgMF34%^C>Wq5h_iS#@V3{0jUrz zrNm7bstDLIe-!x6D!e&-7A$<^{BAtuETTNi<_=+xw2L9cn0j#Gy-lfg46c0HzNlxa zbCT=inS{i-TJsiHO3hT_(uyh~Ia^rr&rH>Jva^HbGZ-kV=t}d@W07kN%9&@ZH#7sb zX@r+#70N&DI43UkE1fusslHi&>*>6;kDSMr?R|UuqJW~vQ$b1edmVe3g-k?HP@Ly# z^S787cl~h9S_~0Gzw_}S^!$5HJ5OI($6lmMML}FB0xCnasgJ1w8?qd>ujRDN+cWZ| z+3|)5j8PR#rLFSyC)M`Vf{&QEW(t~Y{ve`j{-poUttkFNAcfg4-}hucmd5!CC)o#b z$Mj&7w52Ijr6}q^$$mAx0kU9rR4RR=2}C&)xciKt&7M`MUw(aXit2?Du%DtB$-u*e z@ULKCaKj%naPdnmMNm={N{~zI7$nD9G1{-`HB^959UwV@$SDkw$WoDXRM1QYK!03n z2!qa0lbb08Bte0NB`OqTSrp0*sjXvej6OQRtS8&=X6+A5h9p=9Fl`hoQcG@mvPzxP zf5X!@ZoD3ub4LZM31(M2T%B_UNnL^O*Kv`~~Z zB@G2CL=6;_g$*du&<#?hEiFcP zbkO#JDQFUaD5UL})g9+}Tpni~XQ^w&h%^7P}S zIm9hWrDp`?6?>Fq3oNWX9@z_>v#(2JMkfDPz3%OhRz($cv~@N0_ULyX<(7Q? zA>$7F+*CdUNQjH=dl@Hy&7x!Z#W>caXOT{xxaXRX|ChTf(LOtl-0tM6XlIkB`b7Am zil{oY1ekJ?A^!Rw z>~fk()3WVW0YfPkj3_85V3`9XO+sn_&&16`&pLE!q4=^XMBJ%Ha@Ubkr+hgXC|YFY+|w45Pym44@Z=B|}f1;i<;f|5Th zT&U?OltD^SL6MUD!I$5WC$DF+WRz@$G2PQ3STIlF&^^9CzEWMZJM?kXr)Eqqg_Lq1 zNqWJkKF31V7=kd170W6R)J9*C>|8Wx}qU839=XlH`;*2}0da3_UGKd3@leq2#L!unI)T1J3OWsa+r&mTE|Q z%OId@6au6o@i@kJG*erY=UKM#?gDp_A1M4Y=Xps(^PjssA;(X*Y2eW%o4QC9QYkkb zh=!DGCTdi8z)&$31_J>RN=ic}g%83&WE9061yw}@2ZIblH2g!> z&mwg^2}(tZ3Z;Ol3NxgDK%8rOIS)GYZ0CP^yY~%myY%&#$e;7GY5u?@JNRB>MT&@-5t@E_65)xK2g6}-!?{2y|Yq;){vU1TY zk4Fa*qO^(e*GQO)-K0kdkyNs3%q$wE-YnU)k*P*H7)D)H~BJtWR?Jw7@tSY$T2ZaKPJl&~Gq zOPhs9)%1QCC;N(cPklNn3QtTG^{2-mZ$mzbIsnhp-`6yD)txt|ZaY}C>=x%y(i4%q zvUG6@xN^}YL*yZ5RHc^T&rY-^AizR`tj+b`)y$_l&+Pu*bKM*k z(xAAI$455@Q+aosp*eNt>{+(FM+zKF$4r-g&IRsf@Lo*20!gniWv=ZI7xX3KrVNmi~u}Caf2%A02 zF1^Tr!H9k2Lj6>t`ILv8%~KPKYc}RglEV_w(i)tuQ{FOiWG7tTP?X!wD}@ z3=#1bdIQlT2tvTiv-rcUxEi4%Jv@!aYP)6-R>1JaZRRI|?g7JU%&!xUBhH?Z*K?UU z42pM`in~N7il$nfa>ADsxT-AD!_CbMC&gq?6kjKMK`cY_EtUq1wsIrr;y5;o@F;Z4gH%R8Wg5zSojjY(rXKfGJJs&DbTJuqSS1}SO)4IC(mUR|5nNT>G;)e5&0)d0HjCb+>ePhtuPc4jkfn z40%E-5XcOGHzpH7MNpI;T+`m{mC{W)n&K0*wB(q;cpZ+D;cwHNomMEM6y4*&{PJ}a zh^gV|?(g=fpwS4T!C~vREQSme#JBGt_mtFy6ubaSc=(Cx`f!~1(t{xhcaN~(5CI$ zm#q1~>KX!z)H{N8g#xJOFxByDMu>eu<_D58xe;3A7pRAj@@_yRMAWRJI9&)4q4CFb z^*qVW(j8uVt={$Cc{#^(VfE*oxP9Aq5!29eRgnZ!Z`hF>)KV@03lw6m@zSN*vD(1s zbJoj&J)oK$j; znHk`)s2qM*?w{1WMfo4MmSz_ow1}+`%qYr~tj|lss@TmLh6YP?&qU?ctYVD)TGnS8 zb1a-qrKGbKi97?#|6on|L(Us;(I z5Jo8#7BCTtFPUb#dS|5O)L`Yaxp>K+6FR)9 za_-e)uNDjn*Av#$1i|v&cbs(8KC`5ith-APj~yYot1cH^LSb_Ar%Z(79DMf&n-{w* z?EYR|&It3DX&$?hCy`Fyb0R49yygL!N4OH%O+*Ci-k^A6w8+>b2{>OG!iRYd#pK!y->~byV_I1oO+aa#^y?_IMzZvSreJz^FANf#fqPo zeYcFE;UqJkP^Z+MdxAokmFD~OjvVi{B+I^{?QflSW>asD2;(e*kQNGR9LklCveU&o~;YLsF*(!Om5g7t@w` zOm-c}P^s2vj*YC;t}z-kbCfn3m4U>Vvr*lU@zCC#oHKg@~sWuLX+^(7dCds&gpECh}?KNKePZ$b6+l ze=o$DJKcVJ5@}r|eTGHDF$Q2#r&{2Mi3qVyrPi~mf}bKKex@d%3-RLno5c zAw&pomp*sJd%4#qz1Z{ix5dIamr{;URlv|yBC+kL)F{3+H7gBCSYY+?w2Qb@%Y(|U z6;(wQWY@_eTqi=v{AIwPsEdxMNy>V#yyv?KFn5pOmvM z`tJ`r!-7EM2_SF6;{%V3-N`a$`EWd>UoFYyFicX?xXN2n7@Ha{a|$Z8JoA_eD5J}% zjK+D!gX1-iDM4$SnLzRBw4JoP9KPPXc$QK1hjNwmhH&1aw0C-*NrYZaB!-pTM55vJ zYt5paLr&sjmeje6MJ`md+0E{o(vPnxku~ew)#__D@y79(3=H`lT^#WB3L-NrhbJ~U zAr5<`!lhl_-y`1_Gkp1++Vw|7)KIL;V%}8@LlWy^hrHaK<7t?zZ#5R1rfR{2B$-DR zp0Pz8kw(>f?al7#dyeKn))fm>rHSC`VV8*v5l*FRGSi5?bp?+o3JNDR@TqR}zFA)3 z>Ja-bLn>iD%?S1!2$={bdgd{#MHW4IJ+)~%ad~k#DIwL-`s!or3JxtbA^}O~$L_I<(D2;2Pc?+T+my>{1Y9EZ{6!jl9wOl8kD605zJ9#pS zwr374|*AsfgZI(?rG`iN>L+MhZiTF_cMuTFg%N!0EOqq%k;? z8RL;tGVYc**{>X8X6((H;z0%&%-UB379D1?_RT>QAIYCB7weE_`!5??am}VyoTo@Il^?cPEty!u~eK+rVKLCibGxZ zn;etfPTojG)oOdb9V6SkBL%6(KdXwJla&nGQD@T=C zXyb0tHusa8wadnZW)&9qG8X2VtC>);gSECdy6ff~CKZ>q%Oo;ZSi5+uF$*h687!}pN-+fpvufHV^w>6)3Ro5x(`K7avshx~ z@z+xjuM~4GJCz=@G9G*7=My`w>0KrE`1#^}*-D)r_Q_t`&e^T#msvA&I_D{`UOi4A zx0~B}cpYaF+vVquiRC+(WKkidtfyZ47{}NAleio#3o%L(jw6y zBWH6kWjUB)0>H#P}7r4AY6FWA1^OX$0Kf|=dGyfYz{ z9MLp$46Lj;&aY~RGn#J4tCo(Wz!Zp9=J#}UD?7t;8k3BZiCGk%PVG5!IDwObpL>4h z7F3m=J&2;KJ;wlW(@F@tN3TahAK>qSxeuI+TjAWMxRnLKiIiNcgcNh8=2yBE?)>*T zKvg<5Qzt^&`l+4^Q^NO64L}-_}LeXTuT?^7SV(0A&of34^zpOwAaB?Cl6OBcjd`@wlx+({z50LyKr=8Mnx(`0Z6e)t`1wzZh8MJ zCvnzFrPp!{H7n4l(DdNOUZ*z5F@m6ilg2Ds%HWokUFdh(7r;7?LrP$J=}kQzL*hdc zC{ptjlqo=msRp8wiqwHd;ddda)Z{b`Ej?4Ac)kyXM)=NdeLzouP2BE>Zf z8DT0KZyGXy$g79YIvag>+vh~ZM2CExw#|rj1(s-%YiDO-^_p@EUK#cC)Xpb`^XE8l&ckvTzcf8DVw0;F6#-Ttq@ZRJAWB*ik*0{2nnH$z zDB>DQX!6!-9mYbR8hoV}CNyI!=ekawA>@1R?_VIX=;v(Z1Lg~@6nsg=#5C%l4fMe838GV$%8F<`3x`=k zP|$|{Y)4QN6b9H<9JhYm?y`-hB@4~@_8X^nO-GPVmyD1gw6Y*XQK1z)A_DBT4cy@G zC{*LN;@a-uT+9v2D7(w4qh4nnRpr6U$lt!5;PVZ5<);%Z1=r(f>a|S9C(A5 zQc;R+qO8N};HWPx6a``^j6~v5I@ATqTb}8;m+juY&ayA6bce~YGY*Gu!2P|>%&ajF z*Kel2zq@Z;|4sSvf5S214x)VswH*tEVVJ@QJWN2{0sQOg9!;e#X}O>G?PE;eO~@G< zM0B}+L*ChBI#xtGe|<2RCKR=U+$xj?Qn1I0*6_LL(;zd+YiNo#6i0=PocJd@W*$)Z zoI|fUeZN1pn5c1__er~H|8J$9mE|q07B*ryMY`4bJenYMMh>v8vqV|}Qijc}5y1io z9vU{`wg{$KrI67e`iz}_4|i}Jci?t^)QRm_Q4{O$V}c=h-%uZep$}=@tx3u$yAFo7 z$+J3deq8Z~MD;_K3S=2RWdW1v)S4L%{~O0>V0A-&mL*_<5mzgG`uB|+kVPo4*RYL*K+7dti(5+%)oB5wS0KV3biv@?po z-H2`>=JdebuFVe!bbXrM0Eg7=!Ub%AI*!t!l$dGa5O6t~7IEvL%|sgk;RBtavJMo8 zq*i+N%s6SLXQvNOoHWQM2d_(`&X(Zeb?bM5pp2uDgl;p)YH?QBhlHpAX;lUJ!o_Hf z`_;;>Gz9^enHA5{1AjyzYf-f0HeqiCfvU#0O&vEOsx=WGhpJrFg_HW^MXh0ZtZTyC zA{i5;G4HySj@3C#nlX$+I~oq@p3h!|?B;cj_QUP+nY0_e;B$zUgR0cUjp2x;xrWk} zRT_ZfIi0-2*kBacoWMA8|EFVXtk&_Gyv#6(7~HY!8yfq4_}$H#d+HyoiJ^{S0PKOY zBn@0EF&bdRkO(A)LNzIR^w9tdux`qodo9uUeeiw@1J+Vp=$gX)I$RC5%sz;vl5oPl zjrL6X_{pAq@UuAf+j1BY?8A3$Sl7l2^Vd&?BEZuDMzPrAuqOya{q<&tg&f}A%R|Cgqp zFV20QugvULa$7=_zRG{kpXK?8%t1fYoKYvm=>8`@Nm|(t_(dsXQ zhxX*aI38SOf}^H^-j~w6r^pJ4-t!Jfu9~U^Ggm!(y=0T5Lt-w>q?i710(Bet8FH+krV;Sc7GGvQ)-X!2s>d&VMs>mOxrp1*Mp@4suQEi1xlURO5z(ORFVo3 zys(({NzhKtCylrF@vc)(m&c+2o{l3;W;Q3-b`5KLrlK%gIDa5qRL{-r_upR+Uov+R z?jy6|!G{yJ>&z@aDj&U}1p}v6MVLO-djrRCn()!{qq~t)x9{Rj@wISu`$MBXrul{A-dry9m4_@B-wE28pb#48jU5?dojrCk%oHYRz`!&E{zMVeD<|bTNc_oA07e^DdaCt^G)~oQqR_Jjkm*TBHkN)eS|PZIutwF7p*G}{+3R>_Rdls z^`=`kMC)V(fK3E<_S8!6V1UcMnBk~6hZ_BDXmME=k&@q6`AI`gDGaLtCLa{e7Qsp> zt&T56jfbQ7Il9R5gLMnh7JCs|`o%^F$+D$#5f>gkJn*&e&q?<6@!lH_CL))aS{>ez z<=bBS2IC>WiwJjS_;%l>j;Vy==o1Wzhs1(>`8HQ(Sw{4SJiLO(M5$&P# zrRd+lS&{cwXF~@6IK#Qka}K4ffw``%i<<0NAPi(|=DC?EbS)Hb^yM=37gM*~b((@p5k|3}_16JHC6DFK6 z7D9^h&!fFw_4=FyU@hbCG1SmvwXOt5UAqqYR#S#8)h7&XZ)HjxAV04L-1;Wa6e0w_ znC{4}343nttG`NsdmPW((;V!_Xfe)kj3An=58?ZNTa(W-*^AfE(40HYBrE4HQETn; z99+{elT##yNC(Em-m^%;tS?D@sz)}nt%l1mj5a?zdbNnq(SIrrTjpI@{ppW{0Rar! zOT_p1*(73FXG6?#fx0%yZ$Ay9HI^PVvcsu%VcXlHD?3 zs|hx+IInME(=%9jck1D{RZcC=%CN~7AY_G3*ESm!Q1-X8Vf1V|vmh9H=JaUf=KhG? zo!*=myAF;sp`JQyMNm{XDLijZSXOK0z9_S8b}%_g%JoCG`-zI}PVaPM+*<-($mIJk zwPBXaSPgLugKUtBAZ&}lR!A(6+iy=wbItFj?)+_4W;Z#WovfWxqZ>o6sgCZ<(2zb) z$vh(L_jB6$PTQY90x0j_XisCjJ4=Q<;57SrXFR23o1{4thi^64Y~|CQyTC34oFHRrtEHFTSDS)x* zSOvvK!%v0WHn82mFmG(a8z>1P#F7ae5upa;8MI=w1CkTpxI_Lws1$O z%lNC@6~cMUtw~jY7i;8ebAC6dx7$epBS`C)Eu7HXUY(kCgm!QXDibwASYiU1 zYYdnKVN$y|4^-u0@!NpNciMMO{qKY{b5Ad1Hl+)-2PqFkdr;fwWlhpgEb@Uw zIw7~Gt|n$re%5zD^x}tOwe(D7%36B4uNMXwQifHfLK$-*@wcmsvkIK;0#EFrUdZ@r z&0(tb_Z%b1jC5d0B~(47Nd8>VzGnzxXJS_ljaZ#?Jv`t$G%$)ES?xXUWY<4kbnb84 zYrDqFATkm1I^CyM7(s$*Y&#%4@NIK`e=Ir&adRNC=CU@1CyxSFEtW?G? zsu;zr*I$njU_*I-m(@5Z`EB*42CD;8bvEYON$rBej33sWc=4FS5gGX0uoYt;8|NMg z9*xI_!0Ch=U(#SlC0K0W$E>%fR=KsThe@w67@bwaj^0{Mw0{Re4Tek)(xF-h>lJXq zK8rD~THkoV_imvK8MFyCn)BU~YK)vpnEZY`ddcD*Gt>Gxi?Xzbvai7@5Ko16Y`-07 zdNCQ?px0LD*$}`;IzKU9Wck*+tXp8?iT$pu&mdc5Z-@p22Hh!)6xP(wi-)^O*}K?y zNyFa!-Ccv}#WmsexiEc@K%!dlEKK>&w1#`nR;hRiZ=B6yg+b}8z6LSh#e9e zYL3*LvR@SF!fCr@-Yl#IVJv&CC`)mC9+(uzFRC-jg_eC4uU- ztc+f@T)W;)Ub7FWTgl+fQQT-qTN%P=8PLUVzWaqDp18=y8q zefo`)BoUcICf|z!I`zN5dGoK!ItO-m#44^^7%H+>h(6x$3C6WS4$P1xy&M^(l?0>- zbcB}87at-4lpyqY_sv}8?ppJeCExU3&lr z{WsoF_MxPNaElR-W&=VoFv-C6^vLvUo_POHih6L5)CYB7J$XMP2S%)ZM#}vX4#wSn ze;jL|tJxbE@rKb;TOnNp2{2)xu^Gx18Z>N6jUn)hV2czN#5FUYYnp*lvJ`dld$CWH zFTt65dkvcn#OdVQ>Q}5G1!(~W*F2O>h7Y4u98dOu@ZF@yc(hA)I%o)3<};zFutGa= z4xtcWc5>YqQrPiWX8(g#%Icqnl~d)l@gy3uNCI}{(Xt@+^c~=Qbznh#}L(v3{ z(E@hZvqIxwnnd=fWgjm7-%e)ISb-#R>Ik81+zkQIb_?%1ePb!U+EH*l^Loa1MW~_( zHiQ7b5SXg_VE3hdEC?C7?_gKKgq8ky;k)}G2llb|UU~JC#h}ZyLUPE4VgZF4GJ@&NNj=WmPxu!wvJeQo!BaqUHQPy z_p`H!J(-AdGX3ar?%pl2NT=+XY1c$mSRI>NxnE|$PLrD>_uq^Q?WmgaQLrH!QPIH< ziLjRn!~zf5%LQWSdZrEamZ$c`VuHaOwYWvH^EhS@Cav(QP+q?b>?O5@vu$(lO^cZr zMzI7!+nI>OeZ)n+@UH~C{&_vf<5nbj+_E?C@4533Hcy*?$gs%O5fpN!HO7F}(OUf* z7aHt*86@wU+a~ywPY;X_<F%0xgfxWP`${$V}p8g-7w#mb`XiCJ>Dx zv-HJ}*#p=ekVt%eoryg8q@lfSvp5L5UkxFq1gy*b%^ud%QUgAQlHY)pD=3c7j94sM=<+WKq!sB>5M5#j3;BDQZBj=If2!4z5c z>>e5_rtWU^;x2;uRS`U6>8br{uQ-f0YKs{!yb>#}pD-khK0~x}AQ7FntX~;ey*?<- z6xl=HuX&62>|dGJA&DN1z2dVs`RlpN=yR^JXQ7C9JdAwHb*XK3$TjI_T-c1rU_vPA zaJDm=&n0rIn^>=FsXFL4+;#HbFPJ^_*7YcN*RMMD<8JN6I+@g%8_~2%4UJ~ih0zzh zJvXC2SJ1yvpGp{s&L@l2=JdAx6Pbt+<1MU=QEz&Fa}&{AAbf@d_q@zn1@k=_uHL&l zX>;NG*1SB-@zLlrH>I|S_}I+mF`b##az{-Co7+NMWF@^l{P*toBU(V|3}_~_x^D_O zQd@h?#19!f!b{On`9qRX$D{rL)uI0FUr+2A0~P7_b$+oEfH8Cp#1Z%CKPTOR$L;mE z%x#=spUbGUxck`k>M{n$=rI02=)tu5JI0r99(ChRnWS*SQj-tMO=X*(r`7My+wexm z>b?CfdDpD#eyewN6?mXVzGu07 zI#oh4&kk)J{e3)FCh(kcUr(B21cJ}*QEyqM4j{>|k3wD@UZy18h9N|J7($-BYvyD* zd~N1?*E&^kBRfo==Tb+yRopp#wE4WT20gH8FS_%`-lyB%v)kyFjN&t?9^p{R?78kP z%LFq7Qnzy4c=l~gF!bsbG2#^l665;R4i~FdeKv=Ci4jScIn--Yag~1FB_blR6%UQn znryWSm1YJ;TM|7}^r8*K65T1ERa8ysr(rKeH&r*5Y2+mNOLYSitlkoH;`L*C{i}md zUnINn2lQ#N3b{}2NWI4mpu8s`T1x2Pu`sN?6m60b%k;xg6}PnaoOEv$^u| zt;^91(XA~dsR(Hv9!IEW=cU5^q4Priaw+ylJ5K)EHoV1IyjGjhntF2NF_QnYb{-Ep z@&8EXYuGs-rG>n(w?^L8@!;%xfN>#hDj$j{3n5W}9%B)6zdy}C!n&S~b)|CBxv%vh z$5crzN4kbNmYF^%=DI85c6fDLUoW1}nKw6?%5rio+;)Ug6z(q4=Doz--1;OI;E2!h z_R*twV7#^zLrZi0rwALMcxXUIZ4E@|di-m{_*&(ZRpT(lpC+bOnK2Us2ErV5)!BLO z`TTxoiLXC&-U#ewmi_Z-n)!23qtHDWN8yVASQMqM zB*sbWF=L78lpil_s*uo?D5!o$5~zqABo3xMMC_M}9s1F6+w{l$wfAR^w)_kMd@Frr z*paxykKx<<+Q!B5JH?T)vl&BVU}z{}gD5UJsDMFul1m+J&icmM1l&G`TeY3DGqSba z(cC1|gxpkN)I9bsFjUQ)|>6>0@mojgxdjL^DuD|D$tP|2jpuTl=RLy)e zsu05(;B^;hBygDA*E4>drhO*z@iUCLm3SYcAVW_Q->kwNq106nf4ubaRPK5Z=ZgU! zgfS*X$+PLLvsS5EB@+X?-fMqi!=R!9nLQu#+cQCv)0K(Pzn~y&teFQ zd~`k}SX=oV3Hmx}0)!y;EsD_;8JMgb*qfWU^`jXfB(I}xR=2)>{wHT}J(UGmr0UAC z5fM(bdF$e%W3)BFPHRODQDM)Gwck19h)Us3;&hgqPV$qr_~<14!8<@d zFFbR z^&tt)k-)wHN60&H3^xbj2VwdEdO(5wcpd+B4U`OG>QR;=Hf{!lAexqu4bTAB51Lml zgfmLC&;*O+Q%_ov4L72R7pdkIFz=Yw8X)PiNWIpT=4_NLC;j9Dz7b+bFVXA!SHY_- z&*RFk=h8BwvqABi{X53hH4R_+J~Mt-^cYQn2zfXNaXxB9Q zsITqro)?u_!q=D>?6+vrDD{SNG>1!-)$3S+i2fhG;_sEajnK??8U8<`)BDQD zww@4AqvfTSPC=etdKvS*{!`k)^Jr#Yavs44wfWTaHuZW>=HAVM_;IU@Kd^Ujyg!^J z$VK8oozJHF)>9_GZI5Vu-*?gNPigUbjcXWTc1+5Zb~mX|5lg5GlYBdUw(IXYrE%*# z_Za0{qr8dC@NHU-`+pQoYsY%^z1(xps-J#nR5V|ceI4q@`#mjMw$Hbqlom2U_f0+Q zKaQu>?nXS*t@#Yv>7UNWU2hZLCAAaHCWQ?Zoga0vXy27g_|x8Qn{HW~!MsiQ)dCMy zb_BUtgk?6m5Wyw*lfSY9?ZH>Av-1N*Jxd|~Tm&0BaS9H~UZy5UZ9sE|mk=p7qz@Va5{_8icnfSTA z?5uMSL|+UsoX@BT*qn!K;WQzXlY866`qq4mnflXx-*|fWLSUa4tG`Y~{(avN50&rJ zk4@fTp@sR8i5xh^`QOHBp2MDB5qF$o111{6kE~JXePh0h$s4Nrya3DL8Xq2}J)w%* zH;-3}k8kirH*F#C+8%}Jx2%jXHYV#Vb@Oqo?>Ji;zt*qrIzLB8UUk*)5#KeC`J6{P zhVQoS{wmGeAsfx{Lv4=i4AHG(zYVCbBjJyG;xW889-^+F>iUrL zeVk1JRg@3Y^C&j5!bemc^HBJyFY`~VZb~=ux2#W@&H_7=C~=ZQv^A*1ct-Lwy)lc8 zGJFx&cX^CsuCJ)paz<(X2&G>z(tRFT_Pb@RJ>t=RZT$#0`3~c>8yPT3@5f$*?)7(B zmEy~9({C}ILvwm-(U5WOr&s&ed2g+Ei0iLBr=~_dmoOeZ815S-v-BUcGlT5t5be%o z=8gVwHjl-)z>ae>to%y6VHqdo?`hO;HFn%Sv@5;hvQmjT^~}_HnKgd1h?}s~Lt@}& zgPHUgRiberlfEZ9&C6F=sKACIy(%`5k`#Epyo}2x_0nW*9gUB9)}iFaj_t^gs548%&`;XY;>Ue@)j{!+D39sN>XOAf zv1AMZ&oc#1HVwlcUrph`CkHJ>mh#<|uBFnxzE*^0abh#)dxGdLAw#?k zOmj_%eZE|g%qhG>;yzq-KBR9YCE}N+nb_WZ@%a~xA*}YopP17^zkHZHj3Bqkco@yg zM|P)7nDMt9tmbXg<0vNw?D+1{oOtq4$4N${VD(Vc32z^7L{sWMa$c-EylykrWuzO2 z-?ZcttMFq}xde{#w|cG2-QA#f@!v!%$nE~?Us{XYVUBu!(|VrFUgEx>A8(Ft7|wl8 zxtsb+I=%f>2sxj3zVohDGmopzC$4fT^A)zx&SZ^TE$ZyVHq$wVTEERU)ID{7EN9$l z%KGWd^xmZipu}y1wUMQ7O&uq16EfB3*LBy)=RDRi@7dSU_z`-lz|%eSlvJ90`j@!x z4ET%3q%U##7R&Z`->*JvPu}|Z&9)HrlsuKsP74b8RrLPj-&5~?-qEBQZ^ix`;~yU8sVk24O7)UGE|@Tm08@s;TR370)TK zNN1Tn*8n*7j&cS_9lh9-%)ili7x$YxUA=TDdvBu?sr2ac-k$S%)A^UjlbrobLWWQd zGSgXL(0ji#_vhE0e0y(whp%AReu7up~t^XfD7w+X;< zll6zo+q3i6gAnhQFqp@=)3d%TsEYk3Bnj;pp7@R-Vio|z4NPRjV0*V|iJJ5J(|lWf zLyRJ0>)j!h%I>jc&IpGpiGqHZCkM{gM=`K{MBU*2uy8i1{^#a4CYSJZYR~dUh=LSB zD7!El3aqhy`&5}RCh{LK^cfKdi8w1CpyMU)?}Rd!-d9=`Az-0Hz>q#s3_Qqk`YuV3 zkv-t{?{7^}Ta+=xCmA%#185r%%%uLr#X#y3uvLzcODeGH6k1tk_h~&a`^EDvQt`^l zIRC}C4I(ur@W9T$e%DHP%*qb5U2N1UtoikU`UH=h2SbBs1x z`r)sPdg7?{xyJuJ<`eswFMg|sa#-OcBV;^Z#XnDP+m4B$+95eJl9crd7#Kp6XdrV* z4`e2M!255&OE!BQe%KdbGcp}dYt(`JGt^`&L=9-*04D=K41bSa^4f~A5ZI9Ul2hBX z0DqsS#~MDnpOA6)(dW-htr8K@@`tPQE}i%FAp&BddB;vd$MB~wd6r?ty%zI{%Je@U z+1P1&8?THyN1L!euf_A^QYeTqiBq<(n0`GCr|Go={2mce1%c%0c>N1UV}tAK_kBI} z$@HU}R5rFXJ^xPt=qb#Khdc*w>e2HgttnBoewjYOXZRscZduP4up|uS9%UpKK zVrE8Bu|l`;dgt$%!}7xLwHL+<4kqgy61Vo#j6L(QL&jEWmu6gxASi#8-kb}JyFP`@ z&v1bzQ%vP6KUP4+Krts8zV4$s>xr2ur|wGzyQ@NX^|DA}pziFk!}G~Ou4ns2Z=qwO z)fNmYamptjncL6KB64sC89#IFum+&Tl3@)r6tvSEMVAPNBb4h|4yk|95-O;56cH2+ zQ&bU6{d0vu#N>?;#v$YMBo7Xcqo9(K6K9Z~atEQX#v<+X+VngC2IMCF0)iPle_Pbb zd~o1{Ur~{WCQ&uBN}jQik>swZbDmj%1J+HRpVuZ5N!5_^{StJP3Kkdp zU!z*U`D_P*clVM{9$~?U$J>t^Q|Sp|MY5O#dr6>(BmJ2itVsQ1!IQ7?dL5F8O-U;} z18T_;&y>lOuhlCNkPa72Mg2SN3YKYgJNve7=!ME59}*zq3Y6att1Aar=*`Px5R0V4 zfwWduI5@lOEN<0E-3^@_GZLqnp*V1obND(Fd`#@8E@Sp72Q ziP=>`96I4NNGbKs_8o+0Z#Y6a=Cx|7hUZ1vB%h<%7s{-zx0f@D%uEmMlp)Sdi6;BY z$$%Ub$dQo3tPd1EruhnR#}QREADw27C8mjo1K?PJ{=;A`V7|KUpXjv6N6)nAwcGf4 zszcrY!-#AB3#*at%kOnHx4p;SN6UHKb}@Cyz1VcdMKbCbPFleYkmQd_>kZk@QADa{ z|6-5Jr0ILz*Ya#nedC)v$5&n}SOv-t%gGXI>79c?)c5@+&2+HMIbNlU{)eUI~_yA^#socnD2fQ#q!r%Si##Jv1hY1XitM| zvx(IEB70hK`Zj?XXMs2xT}?Z}0W?m$u~owk%~I#dvr5o^i9XJI%J+1J9X4v zso9-RMkLQ9Vi8zHnXra<`y#v0KKHTf1I+i=yWYQMaj2eBdVQhBYDtNCeJeGY0tApC zVJ!5rTrse@xr##sT8;%IL{gG9zZ-0z@>8z}*S1i_PK+wCqnO)7&5U#VCG8j(ELqT^&zR?k_UXw1o)L zFWz#cX-)fy0n}jD1Q^R2_515+l=}uJ^0g5Z#2qT?0qJ6BV8z@N9p8(-aUTXo-5G%y zHx$I&GY!kOxhT^Q0VmsiA3=eDkQIoq`I`RaxA*%0Y(KJx(wgz1or@4rNSIa$eK9uF z6J-Q@Zy~_J9T7vNN8A3rYNV;(g*$!+@h<-oI+4l0}Bi4Bi3jGuY_od1ozIE z0_F{H*`dKCvmo5@;y&?~8|_W9w}dXPQ!waSstvZ!t=k{ zM%}cYzSP}!l*w7A;S0W<)tn*Y7ayw$0v?cA76vVtg&nMTbkKv@8g!fk_3^n;UVp1QPF`8{8=@H5j_ z-*)oyDzF$xtRRw=B6~J&CIJDzeCx3=p2{^su_(CQ>CVpEOdo+un zs1$t=m=)h`k%3rXkRx1C`4kJ<`QxXzKB#jz8DP&s;PkV$bVdRn!o`uJK?)(7;pUjz zOhP;Q*Kl*C249H!E*o&&!}H8R>C6KZc$Jj0-`6lj4D$ms6ZOqT4>sewGlNY+7dac!kUOsj{SFmS}#nWXOFc6Imao>YuaIwILQQr)yja7$G32L?C589`L;Q_+7Z=4Rv1OrV38KeirV253AMP=B;k;V^=*EmsRwVpwT zv8`Vb)S^odYmd$aEf8ZH-mww}w1hoNGCl4?8r3!WQgBC1(CxA`&lxh|{Vb6Q=P(-4 zO;~9~pQRIU%>Fd}6b@$yt!{gXuhH!8*0F&G0Q{ZIm=n$|mU)r6k?HDdDoettv=%#h zVI*Res%bICYM>F9=3>=2Y!Hr!8aZZ=`NP{zv0h9$-UUQ82@XRw?f25V^!$UjQgx3f zwFo5mQ09w7VTPW;at^00&u036c4ML_pp7N-Og{V3AOuEZMj=qB9A?7!!hc`eaZ}G& zdAS+mYh8|Cmqic2c?ayg?9~3fU5BsB5QpC~QkuvNf>H*dpp(*Es5KZ>F;I1kA*m=L zni`~4T`EAXFoTc~Mu|Zbp@w6+!<;l-0TW$3Sv$8$Z`vShY|>qRMQH;C$FEw+rF1MEo z!lKa}HE)k6MclH8CO|R~DcG9o=4K-w%R=F8vXKbu1=BS~8=8+|hU@9oWS>*ECZm`$ z_I0g2Ty*zDA>~X1gBd#Rsp&-T0z*f5dAcv5pQlKnVbq)+b_2U`0Ije_msh3uWJIMQ zAobiARBE&pOD@z0i4OVOVwgovvm!EZ&rg=Ri(sJ%9Gqu5+SRrZle$pYzSbQ2|8uVX zCGL*{(+||g7G7vRY&=8a6NmT^hi>Lk24q;gdTeL()lpY6!vRJs<}*;$u{IHB2<1OW ze}^8P{&(M-zXv($qK6vsKPk(HJIVbX+S8d--j<%TF1aP~N$pBGZeLw%huzU5UPRh&U}1mHjKPbL)ojn^@=@X2QVOQ3H^GfuRH6 zEDtacK@Qp3_5VEDbYUEfrf7e~4D;{E8S|%m#&+He@*F4SB3n@Y#%{pgcnq)(c-9q# zWPrLr?!XMpt-NH<`{pvx>lUshBZseCq#M-+nRt_^SL|)cIUeFyiW1w)^qpgE9*X#M{Var>#PdfSI>LF=Q|VXW8VcsJjU0T-SJ40H}}gh zByR17Va3g6_M#;wtv9DzeR$~Js#4nnCB;)O4_ebDD3k0cWuvO znHemnL_|uJ!qCN~ghrN~c7eC(hKc2&j(+)s)NRCyXDQ#8(8bu#Pja4z49u%NGcd%< z3n~W48dJ|3SAU1;r1oF@SL^ruFdV=5Ly9Tm0hxo8wtans5hch*h>;+*7`f=t@M4@% z?AgtOn^n$n$lSi+uUfsI`Qgis5ZXYW2;TdYQ?F(ikt7qBYIInFl{)QTrT!NBbg(_K zr2EeYcPGUDz~7i>Zn@;(Jq{397ra68B|ePaM_yU;x>9iN`X=Xgb~A{Zy;ys1TLGd-3kuyf$m^Nx(X&zxN9wz| z2CS8iS*9$2hU-7P0VGgViw(0dYENKCmP-KI)8wNCG$r*tI`TfdjzyaueY~sliT`9qoke;1J9m$xr4XQVWlEa=!hRW4;?6R$>UBFKQ7W} z4WueNbT2fNIp6o3db**>-)z0UnX)>j?#)A=9gW%h(pzApDiIC&_6tWzFd1v07UFbOIB@P2_Ngu3S7*nz(l+Q#N7h1XA@t;_7VSc zD0lk||D1iicr`>AYgJ%sTS^FjnvpALM0Yn{;lXDLr{e;_d;ieCQ7*AdU{YO(I6tE zSuz?!*!>WO(2*1YV|!_PYbY^?aw(_))D3OFzJS>EtQmGUpo{|G%>jkn)s$XCE;5xa z#x--njEEoPoOqu=qs2({Rj0Q8usVwRmP8gEuKEU)XdT-_^_QvIpL6g9eum@kKZe6F z(H3B*tY*v%lXPryT_`DzKpK-y3l8R?KiYrkCUn8r+$io0rN~VPK;#_SSl;m-V09=` zQ*hx$BS7$mBo}QV3-TZsL;f!I=#}8>zbxCf`2A9XanH%KXoSKfn-%$7Wzt}iW0hR1 zg$Z4^ouuBN=?EP?m@|Q0k|DXo63=Q z$~!V!&%_1QIV#S`43a{Qq@+p`mXM&JOoMtXEbT)ccJS@&dN8NRj4nCAOo~l8lq&}V z85AZCu|pZ-CNcuKOWLw$He!nrk_Ipdj5>mvg#v&LLKLg1ave(UpfTjZ&I!ixMk5e} zZy|}olUPL*SqF}ev@xO=yw`0ASP3At)=cL`W)peA#gJQ2Ou)!Bz6(=~R7Fx#R8dG# zLRjJRg#RD-ZR~l*SQJC>d%LBU#$@$%G6-$2%*l#Kj0FZGw+CHjD_cq^mhwgiCfM)f zhw8}`C>Nw;=e~4-+yVF&{@4RV1r#UKr2%C7zUs>A*gCi&mqt`rq=$!c)|Dw~p-Kv^ zA53)^6j4J#QIH2r!SnV4q81=Z0*XouQ$NleUCUm5aeMk~%(5Jl90J2aMZ~qlsr!i3 zo#n_VQ=#7pA%i4`vGr(TiS4$}ZXw1zL#&+%pbCDMSb_!A_H=YtMCm-DI`!r^aMB&2 zql*a|8XZ2p2NwiCHsgnLyP-ryQBFXeI&kC(Hzecx(6f!|1u7RPG$Z**)USL`5NK{E z$=OlD2<(l3uo5Vkacu=SJd20M4%J2%Xi1qVdPCR>F!$ONv{6wMRt+8CXlP0b`bm&S zOgaRBo|fdbK{Y)esT4&7*)75Kjp;2b`2SFEZqouo^NPgCf$%X}0NypdaMOUknZ(AR)p@%CPNZG;%>e zRzOinxJ}A|sS=ebMw&vJ5`$Bc6cI?G!9eV5n<}1$So9XuwpgYk(^y9@RVO?w*K&!m z{hVdq+vx;QNK`6ZiU?(u2GE+gc)AL#b#PO|Qb$9|HX~+-C)F>K|J|sg#vNo9mBwUTa z7V!WzD0PrO-IxxfIfXfZbsJ;J? z=`Q5v6nKq51y>O2MAV5$ZV)#i%qZ~=xG*wMqajLCk!d-Bc$Ea)AUlKuP&yzDNs#JZ zAxDS+)C;K{$~h=G1F)_+G$#~T8E65I86b5b-r3Y3qL3CNxiEz(15(qe10e%wEF~jI zIRG3$89I^FwB(DqHj@Cfs1=i}1D`r%l%NVpCs0SjMSwZYb#Uva2BA@a=P)<-s5$O% z@Y#dik%=DAYzcBHYMa`X1EsP8f`|LN^!O4tdy=kPkjaTmEwNV8pH6$`6BCMX%aFE) zqq=vGcds6j#qQwhRYmEtItA$PW!< zb?@K^TX#IbP>aLrJ)!oQj!a_0ip=?06k|3UgeN|lF5O0lCv6i@`w69hyMNJPm5l+i%UQA&}Jr`<`@(Bg;5c?LDiOOMUW>Y=W9n_$?TSjd! zp$ooG3Cs=9Q1F!^kUP?>?YU+q8P_0w=qP+Znt)J5+HnaXe=xDBL+-Gd3FPo#T#JF& z%MJN462Q~hlN3=!7>*EVpo%^-3o$gb6fO(g@_gCzRJ!MGn4Ker4Zwdw(0q^yNfln*ItRQtd>7K!~C16W;*nKyDgb zk;qyp6C@6@+% z85ZW^!d}MJiy^CEf8AmB_vq0Vul9aKvN;7gIwO<&na90CcTz8Pw=Xsw)TpAIobx-L z)M*}@!45l}=UwhSfm6?LKsDKg5p+8vktb=K9o^bcMZ%~~ghb|Y@)D@*s%bKYu;Es$ ziaGD+-hGg!nb59TB`+P_3U@uk)VMlnoRhS4Xj$lm%DcBx4;CTHY}@*m^k*Wlms_lW_BOf_s2fG_Fxe)fiOJB`&0CX-JHkE^O46 z?n<_$w>&`c)el=$gf4< z3KbKlEY4jZi6WLNNiiWwCo#DMo=$XxsCU};v{pDou0;-s1ze1p=2X@~5#fabNV~hP zIOmeL(BsbwVnFU<|GUROeZv#JvWnfVXRNkNW2KQuWv0+g+d=iF3cRX{J8of_d5}D~ zyx65h{v6E2ygSoONU$ny?=s?T&Q#r)>SGQi=55iBZ92eD*IbeKu>E^HanW4nDVDie zIb7Y=rkz<88=mD9qC267w?lL>H%l_N*8}b!Ob^_^r|>(6qx)BbPGBL@1ZY4ik!htO z2$X0FYJ_PJplK1IN(veZ8u^4WRHX`N4T&a3U2VSZtqH%h7m|mX#x*e*D7rCF0c^rfy7uW1)i8& zcgP4APgYa~1Z}u71y8pG?rIaOh#CY$-jvBWLotWlAa*bW63zR=km#S{klltMDW{ln zi72W!IJJL0W=fidv^2sdYaENQ=IHaM1ns#w*!*_$h1oz=IE0dNjhR73%m z1ZQQ~T%>@Yq*$^P9wa-<$AnO-<;o_uS7pG7xw;J0fp&r%rp8p?AQ}!$>bhP6Xr#kNo}OkBgq|aJm-*Y z$2Ot0LYnGDM=wyJ$4YioM1i|ZfoWK)_dP>G0Xq@bW_N(yNLhw(imH5$3Qcmt;&y|^C&25gfj(x_re ze)wUml>)ld#Se&C4p8P8oMYlGt9tG{>C1@@P1nZHy^Y9xN#=9p?#64G1DOdlr~K}12`avnLuk=rZHk2_`E!8gd9b`K{z6O_QX$}SNJ1Ds=>9}gC)?qxNr z5Q~Uuq~b-$S^|nmDq^6ek_d(hii(n=S1O{Tlob&JNOj$b*`ec;&Fnyjk#4X^2ILXK zBb88ljyc$<)oylEaTe#1R3xe$q`@B_A(txP`9}LnWvR$X)G7isiM9g3#D*X#0OXyy z*ipyFc?A*6jweV-St8AhE@6geDO?Gu0cG1a4i`AUn#dw30}xPR7?_yW0>DrO!vkJ3 z7fA{rRDs4U6quPyBv>X{>4A%@T29e>U>J~GwZ05B(+HkRptLX&1%u~(>Gs2u;wr3wlnbz+LTt^> z9*%d&|XB^_?6&GO>2&tA6 zmVgRw@yP*)8D!&7;uz#PrdGDYMHNiTK+r^v8ska0-d<%jf`E}m0>Nagi_6?OlwM)x z7NP_+NL#QqGzB!%mtSr`tJI*w7xvd{MKBVqKMLd&rkDCY7*>QM&4`d`c?YEH1 z5$jGDWQ8AaZWC?7Y7+U;x5Qnvi?EEY?UQ$M7~&je8)O~KlbMc5ZPbNjW^tBal($kU zh~o#5)!msuTy6(^p;v9n>2`P`z#lLiHMj4KbTZ;T;{3`2WL?UYcHJlu!&iYF)He05r(aBJmH8mDBM zV5F!O1%knOLa>aqlq*5FOp>U3=?;_o_K&%Dx}GAKfm7i>F2oiuKU~U{V-RIYmmrQ& zGn}05A}Sn{B;n!=9K0{?Y8+CyHwo&`K=17L8Xyxfb7O~Q!q@GHr+vM#q=sL5t6 z0&jJ>1p||xulAp#IvEb~efgP6l>v%YimEFHDoG%Y=0s6?%P6!$D*Zr}6wY#sxh6>6 zPFDnq6P2BQ(L2l{qNp+`Mql||A6VJ*NmeF}8FJBOOsF{#L|QFahi4*vp#jJa%5+&) z?&0b)fkam~Ta(OyqU?fmWnJdaF~W?4krmHDE0k5_UHYRdlv1JHG$G6=h^91A6#TX` z8pTf9uNZ1&I}JLL|z`A>mZed-g)5ly8@?CFLZIk z+i@Mv{axt_eCwJhO){XMe!lqkswDH<;N<&gR|w=$70L)C%|#ScM8i!@O;HnBXEZKi z6;IZeY>^Kc%yhOt<4cbwHcH^YSgN7UO>(Jwn1G10gU5265H66b(dX0Tm+|hIMP!v4 z%WNKV5mqlOtR|9ffxo?>%pTrx6}9Wy43zO~!m6Q)B&D_vVE8^$;?q!)l%QFGnR67( zrEl3>DTHtfg*VOnvqz26_si5c29unTKv;^X42)W&h^nd@MyRBuriOxssY0nLVw#$Y zrJ|@}2$~9M8cIq^X(}OziD;pqq?(E(iUKH#WQLTgN=S-Qs3=JyW|~&0rjj9|s%Dgk zA{dH7Dk!C5S|%D|3ZjCFX$iV@z%p|)anG;W{r7XYkDQGH9S5u1-;)46B{_un!~#K9f5-3i@;dKT} zNaPfdW)uUEBoMEpLSh{ag#8ft#uXz|gu@Jz3DoMDa*`EoI8Zc@g69r|oQ^+f>!%O| zQA`nzbco_iS;S2%q|kum7dhuK0mF<*mlLk4+SRnV6-+K3c%2>K)H+~^19b@G(#2@8 z1s7sMQ558r+fFp8j471_PP>bswsV100nad*L0)G#&U1%u7SKtF=W=i_I#nkeJ1Lap z&k$WoI4aTv-2kD=;dWdq!fiO@ESXKzWfa`lCD7jP(;UPYl7)?T?v@rrK^pH8cGb5<=Obr?%PkNCwK>7i$5bDHJC@2d7 zNQngw)~gj(vqvX0)(VdF-Yqf-u#{0K5d*JQ{D1t`zze4*t3;1Y zzEHV#vT+y;koS~wdIva(ZMA^hm>d8Qegj>m&a+o(KlMNd2LE{+}`)_oz7{(Zhj!%*HZY5BYgJ031^Lm+_%- z0-Y&x-Wkk%I`-cUnkRd4uMg|ZHsk?F(@5xw75Ee?J(IO)3wn4K@`Ip zQW1zcgVU=HCWcySj1Ru%$~wOxGaIbLa8hENp*nwtGBFK&(IIYe7dyAxt^b8|(^Ge5K|%fcaLoVR~NDKfBAYTmU#;Q{6{3n=B} zP}>bHYyJ9XQj!S<>vVU%m1TW(gjF-rK^~u$H9&H@xulUBFAp<Rk28Y{J7&$`z;MY`AMy&WPpwD(o6JOIz%4i z6j%yc+BkzA!61Y&5Adg!$t5w&bNecCv2u)2YSPr@rXeJX4%41^ok^l6t$4xYheQv% z+V{=;`*xnTjZVY0!Mnz|td&qT#zz|s&S5#5k;>)0>AQ@!_V<8(cb|gS;^5u$1s*TR z+SMA?^}4JxslsNS&$lXdh3#$!-g}ef`W;GkIe8yS9^RULSzf;fA0y^4BEVIPJ~n~R ziuw9VLO$LHKWKeZ)2|-|<6HRA`1|bR!Z6zSh0!viEWgwzc30bf#OyiNe=;ILtssSw zsItR3V)*rGJy|totzKIn9(O~=Xo2So)Wc7vTsp3J*&tw)v>O^S6j4N!VQo1w*-co5 z|0%6%v_vX4(Ky)tXo2a|o;s1I87cuG#Hy;A4RNqYAf%zBY;*f}wYQPIk09~_F@v+s z4c&KYL$Yfiw*dztouBCca|A&&84QDYa=!^QJ&|K;Ll8@W_l>{4PRNwX<|*SXzLQ2ws@M$%$pSW1aNL-k7kz-1#yCH+_R zGG=K#m;yjhho%oHi?hPz_OBfz0mZ@jZye=)c8Q`pw?ETznEy!_3l*SEt{$4soy`zM zn|8vkj^VGG9JydOP{@e-Q+N==Fwn4=We|@ns*CZNx08kp%%&sq%fIw0o|jfp8?;`(BF&qg2WUSPM7a#fg$q&vFZx`3&8Md)#pAEzPt1T6p=HQ%6WR-yTZG+=2T3Y$CC5ax$t5l@vo4%gC2)RR#urWOIFYJl_=+cd+6C z|86AqpBY#O^hpgsUK7qxlZ1-^P%2D749hl2sY9e8(pQb*I7UH|l}bh7C~+gw;uDBe z;tZ8Il<6zKr`mpGIkSC`D>phAX~d8gKz-6eRw*P{0HF_%;srU!j$M{bLI#>yxDX%< zf&(<4=lsvAwWw!|4X$lv&Y*Lk9tFC3SQa28#88U?>Pd~PKJ!gs2&pnKgn%AFijov ztpt_nh54yIXf5`0CL@#)tT;XA!y?!>EH_8r82)X)-|_q($$tTsM*xYLz1IsonK{+gn*1!B^SGdwl)m?y=HKL28Bfn)6wWXk$U@ z{a#{C((zQd66}g+(=|E>FEc_lgij0JitiY{V=2ScVn~s83feksHz{H~#+cuQhc}6? zq4|C{Z*}4r?kxT9z4f8I1k`t$zQtBoF(UaP-kNW{ltzSU>paSCYi8tkvCQK{np^q_ zI>!c`y=EJ0zr0RSYLmVm#Nfn#6I1qp;Z_0Xo|inm}?NPiY=MW@O(Gc-Y(V7f77Ln-k!jh#50!dnfbIQ zLliPcaqTy|>zFuSDH3xDa)X_b&1J849`O#wS{ZFf{NLtbyz>{ z=MZa|;TXzW>pakH=(SryEUI)a9gRTqs>H-0cl64xdsyz=VHE1OPkgk-QjAA5p-<7FiI=b(Ya)!>@+&fT@V zo*wYh!m2j@*AB7rXzHsSVt}LORW6f>2ax8ETNoO?ZqM@kZM2ci>G3l34y47}UYh?p zp%;!*HV%i#)aHT-^powcr?zSApDYm@no`(3<*j?^g7>(@(97&zc7qL-(M^S%)#j#; zO++xd3X(wBWPrwu!Ww0XK;>bO2N#zERJe3Y<+v(XA_c=k5;L39!~wRw@2B~MaaLYY zP~EY0@Z+5~qs%uScOCkEdYo%5Zz&Rd=E=Ijm`+IxERK{lwrnOeJu1|1-eJ+)EJjCw zs>fv~cE@<8o>k|R6=0!7G}}`SwVKQ4 zF4JG$7`Fu0xbW|ucDHM(>l@7bp>d-4dTl}?44sft8vZu6v0>GQBA`IG18RnjP~I^6 ze^LQBJ(!8w<6^a!f=l8S-Sh2+T5+d}IwvSW6n;u2(lVG3nOY%iTqtsZElHJcks|RS zZOjR8O@nJ(^kCR*is@~(Q%`NbJdK_!fmvEQHQKS|_($Y^9OR1_-%t~+~;4BG+=>~b0^5qs1a#o zBWuIMUg}HKo|=taK`J)=8)5G2+3qAY@pm88dkypV*Y9D&U!p*ScjbrRQ3B8hXY9${ z`0l=)Z1&y=a%<;EaBmY1EKO@W>rl;7Ysy?=ZbOECKp#zYeM8%%)h|9Cr)lbbTN+?? z{2xljy|BL*+Y;9wt>=(ftD%8k+-?N0#xVsU+v!d;e3~Ms?ps=pU(wEK%{Bc$;bmcu zVbC9pnh6Sv6ckYQhxp+|SfVDJkA6_Z*a^f^w24BLflEMY$zC>WdaJ)r6hi}~40!8{ zsCoYFhgoWh5|)?3GA5-y@|hiBKDLOjB!Ibchw@<_VMm~p`D;)}psJ3<@2=d63P=R` zo$)1Ph^VF{D?T)AEXIsK|{h$LzPi5(j58?wqU z_el(amKh3LMFkNCACV_@he`c5A6Oy?hPcZEFSUlJ6et=%rJ$w81i8CoDH(@p11Q$h zhnNZ=f$aX!=m?T3N(dl6r>cK^DFCAv4`NBi>)|B+=pvE|wwrxhO&i-v6|_7}jxB zR~fyz%dw-0A%LIMy@}^u3;A3igU{5jPPMBaB&KG<4AlQx2-AUuw-;z0Q}210Wvox{=-Vm)1=uBOqx!vJSdFudC?ouJ~=1aXX?2PhMFMrUv-(5?IFY` zob`?hr#6cxuJjHNl^|k5X*7Y+ffVBA+!`c!gyN@gzH)Wxk69mR1BfB5e(MwM?KjeK z;IkhG$<^lvr`jJ6f6SNrkR^5S1P4SiJ(<^Rdc+a%2mVf%Bcjy?bHp)KA<_D;*40qo zfV8M1AE2JY-@Kdh#Bq zL`0^t7noRf1q$8M-``%llmpq@RurQhl&PMXqvqJ^Z$O4%M+X^kPdYwI3ysE6ghI~-|Y3^nmX z*~FX6_04nSf4Y>)BxhY7gH$jtv_PL^;^sTbPTg zZJTgxI96jn#Zb|e4wJWem(08v?c6!kGyJK%?QfmuADTRw{?vw^I`p){xpPCiI@|nhI}S!DEV27$&)&#?%*bfyR5$d?i>C8Ej`>~?$?pnWO}jW?)bz9-B-reK zR79STK4`XnCy@E(m7&zXjX!Y(;uKzQ$QEw!^W*Uk$x&CRl>@6-FXjRGvH&1z(_07V z2!8kYb4e$Sn=v$PLv&&rCNP;TF07N2Xn}-QqXFGbIY3i#04bLO;!QZ51g=RV&)^f% z&B_F`BKFiMo33$!X8VA-Zxa-1^TN}A50mgYIl!900wV09GP z%Sf}WOG+F`o{RAo`k;;O(KlD0x=zzYOH8)QL^DQhOKVdza#*kr80KBj%38TwZZzf`=Q%o>s)+8` zwSq9Ld0~h{h;A);c(u$W%O!Tzz0FDpm%2}W5aM9OLxN{f+Ebpp;5#E}S&Qe{?g5AyP;h4@)=5=Z~Ft9`4?Bf|M_JdCwXqx`O6lu{^oF zOP&~Uw+?CJD}^?=^@KYx1H55b<6Cpu2UGNX{t*)|B)h2uDORC}FY9bliY!yq5aDuY z@knyO%|OyamJ{rr%{0g?i+Qbb(JJ)w6~aP}UL zqzXy&z)|iUAf#GCi>4vPZCpCB2J16R%z#npXO;m$l4P`QO;n8`KnX|%6m`tT4kuMP z$f$TV=V^!GO`})aLB{PIBHgAehY)6wqD#jQ!CQ*Q$L8zYX*sKL>(u&XMCl`Lf+W7< zcoH(h)Ea1kq97mfqZA>zI$F4a>25fIGD>-cN<&FQ^Y6-)>-kQ)nZ)j%-DKh0F)LX*%EG6cxDPpQ8YBT!F@`f75#6R03)YyFO=_&qGgXQK z*7L&)9$E>QOMP@kxZcIQsV_<4yC-knz@MDZ?)9d}%Q{ zz*XxB$B6@pIwmGCPFbujbCh(vvxyGvqr{#dYZNZ0zl$_sB9!l~kS3Mf;wxki<_OpZ2U zP20hM9(GX9SZ4yr+XU@7&IdFWn@iea85nf~k8O;v=Z5V<6s-prH(HHHNOdX9WT#Lo z6p9NZILIAZ(;2Ta#d)kE03|g*rA`G%TDXNx+YKp@$xWiFcMQlLGZaBd+iFc6-fxZF z=53kODI$oRXEOQQXIgRN!qIWdNJEI_^r?)+X4!o{FCQX$^T(vzr!6s%#!8YdGNi^{ z*{ht*Oiat1%;N(%-g3i(7a3RwIbp_ap{RA7?Kzo&G0dXQ{2|mc%2@}DFy0>Df^T=X2Be6zL~-O!0j5RbaepVw z1=d|$F+Zekoe2)}_zB7dYKgJH@4Y&Tc!Fq3h$Cs~(RlHEYM^^{c^5&yGrSQKmcpxH z(VnY?<(+g`Sab#)zI-z1;aSdDZLbGIZt)*|eA+VclBNUk>RcD)lJZXsGn0RSL)75t zf-`o-+TkZ2B=o&}$MMUN6;abOgag{=FkF?VCWd&PbEgU2+})nZP_Kq`+s0`ahFrSq zl~bpwl+9?-AY?({2xwq`78FLZhg?P@#Gcg;Xrc*RkiyP7pN|WUBF$#5Q7wj{=s@u_ zRv*EX$v{)|LRkMzD&(UlQNpOFwuv0Sl4QU=JW%z8n5+dLipLrr^#4Rml!aujrnnaI z3{1KpNyPnj$;gD|2mx@Of_73&66bA%5$74oQ2CdlB(sd|kPnF>2b1FfnVhgN#~jY{ zW~5buG2>*$Njo(lpu6L=h>9t?u%2gCEx9L$!uMSc-AZ$1-mHU|h~_vBvf-_9FtM2K zZkJpsft3ic-NdT2+`eW9-{8WYb-W~?T&u+`yCefMVM0wG>-$u^{u!PMo~jImHW}?J z8eYtpz5*cH2wp$9nY_6x?Pddp1~08tv7qz!Q!{?@SBai#DF}d%5La}WFrqrOYZ$OXAzGIm5=n7@%@+b?kBBafYOWR%!nJCw%RRXx zzOGmM0y{|FT@7W4ec)_wNqz+dqE}}Z3acRqhSuGDx}+HkswMq6P^%mlVaB^ybVY8-i`hJ^z*v+j_k$#aPWaRzb9&*SFf=XTxO+4i>&1N12?se2Kixc2*Tx~nw7DX<7ZpbA6NLPq<=jW9zNh4rUcPlGcoTR=jhVKmdNe8$SmyY1PQZb2htrqm=MJl;$(kV? zj6$6A*W|C@3Kh^nRn=8zBNgsem*|^vD7tgqf3PlkbofWgrYpJu*!CB{kGDP<$<1`5 z_D<*1tS@35z%|>xPKmlnkPW{easGtGh1iI65ylcZAYhZ`q;T8ujb`_OeRZ93teI-% zevfeubhzgR?RNPE&b-5wy;1t~D7T->`8$N0evG}dmg zR=IOyP+j8=0D?c20tq6|!ZiqZ%$$<>`g8QAiR!m(>-MS#;F3)pSO`H?$A>h%t~KUt`lNOYGys<2=7iGqEao@-u;9KnXB z>@nLx5v{4z!kseI1(O*$p-6Q=5j`W1whrZ_7L9K;W@D8ur1v_$#!no}gGS}p&|#7T;;p9j*r=;3o36}!M#1EGJ4*a(2^LDx=jh(9hszG} zk2T`Qc3JE3^|&_R`u5BT1V}{)`nExUb)8L3=N|^ILkp`tdC(y~Xe1LKcTI$V1(d zP6soDvi{kW&5T=SWTmyEvu2vRf2)e)1vC<+h?kTz0Xc3K^EZUq&9;MKS#pOab1xIF zCoR&Z+GL}-GZi@0!D(Ft5m$2xA~)FYPa)j|MGo0Z}lyEhbQA|u8cGH?< zSfv=4p4g6F_d@#iIMvpU-bKT*C#gr>2!~$Xfr}hWQtldSVTN(aBLre3McQ}C1F7g( zUymAlkWnn6Sp_jzP7a&4b1pL#Qv@q8X5o4TQ;}4`B+fWg3i9);n}j7)$at)ck+O?223#q=JnlxQ8j+&3faKq6 zc>;W{nUe%KDnv6ajzNAt6U;4Ciuvc3M2VSN%*f(l=6a!mQ5l^wN5nSVcVJ#kB%OC- zCw1eeitZ|t(d6U1h+gx^cb?scrG?&9W82*KhD*bUgl<0lh-C5+HUh1L|*pOBmknD@gXUT9^K4Bjlj`xdf2%bz7WLkFL z0H>dwo}uiOt@UD_@18J*V|7Pn(~Q>IPWSfjHh&63h0vOH8!^THR|zUQ%FDwlR`XQ4!fPPsesokVJvb(y7~@A$yk(FsEq_unwp9 zPD@7R$Tta@n}>0`a6BHgr(JaxAF1Jx{2rd48auu~*i`H%^`YxF>6pDSjG1{uKQ!ID86(@?`72O=GdbQ2sZ4ag#>ZC4w8=myi)TG48M9;GesA7 zqcNfrz^C}FFr&g-F;OE#f+nV#OCuFJW{Th)}^WChO+~&B1uj)UTA7qCo<@^A5umw#iJKl(T`T0J<+?SK!&q3}?he_vP zbI^HD6?{ErQ0HXzuve^rAX;n{tN_)mUa zNKA6@!2w^#UzgDoGSb^%fz;KrEhwx@Xxdxc$=|na73-P7%4=_j{jM=rg?zda%gCP* zxw|ptVmFtmD$y{T1bA+2-9xMISvjkYn<2=V>W?>3rz}GvZix)Yg26HEIF{4M->u#u zIQ%l|%S)n*c2ngxPqU(Xp3xAPkC4n-v&MHV?rS;4#mf+l#Tc8aYDXBn&Ip_10yw6r zZ>`P}s`Y%9gc+`ii)T_YNMtez4ERwzndOU=U%hjoodchF!hIVZ%#{yfmF%Y$c*f2i zx3UiB$6Zcz)V1-AmwH2X$-N$Rh-7Esp`uy4`7Zjsp9q7iGeGjj&aC#N9Au09>f^@H zO6`mz=LTr6Z*|px=;-q8LEYIHU@8_fv$?bX}vQ(ddO==MD+s@&AUw!T$Es_R?)7l?$yl5DN%A7IQb3zFO4r2{?XKI98m_+7M^@&&2#qk*PYDnS; zgGo_*hr?ew&M07#v#h|~zansobOH?wxnRIxVQ2wH;qsOoAUf56>P6F`P|x6vEL#9LGH8JmI|Qo$lkXB-kXj zqM*aaZB)y7bSR-@NvnelG_moZ?$HdOTL{*z2EvTMP}#~gRw0m35xV)_H*!kl7{hO@?4nMRjIl2D=;)=v)Hd(uvyHXcS62YV#82*fx%<47lx!TNb!fzN!dcf`6H zzPE65XvyW>P9SnzX5vxmQ28|7i1UhHz^V4P-1dk$G9Bwrgk$cgJb-O8Gh7?8$@bqj}?Cld`8 zmiN|fKKVd>(C+Rc^hFtE^#bBgHc@A}xIue3P~nkfS9v!5IKjvZAh-!VG3w+A07Pqk za>j!h2x9qgX;@ z&JTXnFD|Tj%=K0moXMQ^m@_I;iCCi(j^*C;OPwv8Wqf_<&KIB zBQvf7P9=7_+$&NdZG}v+wW!rdl==z80)H4Zc*PI)}2*q=%rnTqt)92_4ILlW$-zft+@P9@qM$)JBjMiiJ_na(QYBd{sXFdCJ-2!v3Cm`oLs+2f@PG~jOv`+2vinFr8|OeA?WOgcjX9-@1Pn(2M?XVubbj|DK&ze zNL7=@ae|OyVk~VcQ*6|q-s#J^Yjr8<)~5i}XXEJt{9!2TxCs!@VyBbp4G-%Gu{HZh z6H|GKe4)*)KU?R1c6X5|ex6)r&bE1Iq`vtx7rnxvTx=>9le&N?0{_pPkPZ$WP0o=Hf}~SoG9pS1laanGM^b@Ec|f^4+8u>o4Vh(Q z0;eE#j~nYy*oL}#Qz5#L4lq zD$MB}NW&5Q8Zhr5QU`Nj;+qhZ)@e$f&6x}LqK2ZsC5-d838{TOVtzK64`CLW6OyM* z^7h}8sm^w1b1>Q)&_-cvC*Ma|F$~N^&)ULJ*$SISoV1*C7`cMFhd*jx%?pas%lc3cFgsAZ+K z2zwB@Z$_hL&a;Wv>KcCDrHAM7r1GyMqN0A(4jk$W2#A7;8BAV#3d}KC7a)u7UsTtNQI0leEOhc`)%rV3voU`r0;c_BB8`ivP zTerq&2bxuNW+$HKBV#mf*Q0-!2nn&DsS-aQ0*Brm9L_?p5 zrO+-vNv-7Z)exMMxUU>otlcr_8GC&u3Zsi^&R;OZ9nezy z%{FgMG{Lp2UputznQS=o6EUJDHl@RnnKio>rYV=kXKBZuFP$~U+jxV8$~xLT%-!L) z7GF489|m~GKeVr!v|Ezg!}oap#p3FNx0xH$uz;;V^M4Sh5EgV?1SLLs3PT@KcuR z3bM3RZHJlMIhgZl-cuNw$HcnQDksSj5LD@-liU`3!Xt-r5R1r2;lh6kFpIexO48GF zshZDh$ei%tpOliGdzc;F%LO*m%Q(WgmigJucIC`jiHJXEWklGClR>dPqQ)v(htFw*4)=Y{h;Ef-T&X>xZy*ZBV$2o3fFWLf$jbT~w zH9a-r2ej_pV4!x{N+9vsQ9U3-AX!6gp$j1g9WC5&Q(iSyO||Az0eQ^C)Vk}AEjKaOaVVl6F1kL}_r*KAbW(R6 z!Y3Jd?wdCz<}qYhA!|)>?u!hq9%;u-EUsLccPhg*h7RfA%4a&*p}Nb~SL2Y~D6 z&uscV9nmLnMAk}LMTJy?%9_mE=Gm_jOv+`lu;yk8ih)-fiVCP|2QtS}*GtWT1j@; z#O%6OWlQCz^Dxn&=H zyyb`1VqhYIiPoYVXn}{Bm?g)bPWPbl4GWRSb2D)|-dT4Uxiwd9UEFgUlU%lgj;o23 zMW{m+i82A1B|-^GVnOZG2S+a{kZ6JnL8e~QC8om>awl!^hmh1T@x zQ9~WJDbF(p=XjrOkEw-Nmei)#F{jelXG1lGyJlF25@Rs3oYt7u8aU0`7hZQE(URmo zZp9Q(9BpqInb#jMxvg1iY{f3pj&RQr?(y4MnFO|Og;hSuu=Tc9y`iEg#~F?+!YZDN z!;R+wI$E_JLzUj@dCGGxS}T5?$6iaJK3sC;;*n^ZFwD7@jwq_)Vsi<6xXmNih~+4y zQ`NY2#lgNXqZAbDHFBAh5eyQbmZC)kRl54aMBdWBAJJoiJz-&iP_ z0%;EXS)?7RO$@fkvKb=NyM~2=2#t{3N!{592F!P2?E%Pu^XqaAYKP&|65n^JhS6)I zl_;z7*T#3aK`|oGY{c6a5m_@Y8k9^*xvRsLBA5~gMBa`SXc#Xg=?HX2jWeZ)G6-~P zwu{FEJn4P!N4uNeX`@EkanM_fcXmv&R8W!#oTW@|@?p9JK@H`epW_qvDBX0;a>p^B?S1HRR#5UOHa3?=$Wcj9X(}mBS)4A~Q zxejEXjU?uMd2vJ$Q1-*?EH4{y3aLGN;lD5qVd{DlU`RjP=aBVz4E|elR#MqZ-KL_! zNUdyC7TL!YL>4@p=FOud< z#0=TfQJHMQ1i=ytqilxjb8c%kOu~f7>9H8aV#R`g2ZPO-hGjBba^@K`6vHtQpcxom zQnF<5Oc}8t@}O46M8(GyqM*ZyK*Rfn47fL?EqQ4 zqyrENkk$sFqb6K)w&cr!K;#WL)6A*L)v%78IFOZ-FF0O90-ew6P)&;iIBkjff%=U}=GxAbN2J;@zzcf>KmZg$$6E(r`xe8E_TrM<$A++oSJ0p|{)0b&B*qmj? z2``p-B6-_~JrlfvIOi_;&~t%Foq?gqC`Dit8k~H5kEXnv&fCrjoJpqGaU{fxH8_T$ z!I0iuvy2a7YR$xG_BK z$<7I!7tZaC@xh72?4T$v-biqBxhuO0Z6;iyu7D?HU6o{Ap~8$yy0lf3<;ZZ8=-}dn zNtGHZMak~2(NNS~+=QH*b+CNyML8nxUiTI|iw}1XLg=Dp=$!OtJI>oV#upY7y(dhl zjRf`Fmf~f(jWceh=awc{mf@56q&hnw+hTrE^}w^V=>YwlX$cK^_h%I-o04-1WFDaf z#|bmAah#h8b2fu4v#_C(FfnRz%#^F5;S0IA=^TE0+bVk3({?yFAgh-fgq(aM(I*LY z#nHGoV`bfqyO*O~9EF~7-E?)?2CawM!r^>Z0%+dsCvDS#cBS-;gCNUR6)8m%C3xmE zl#Z1NQ99bm_MG5QL{U$vh(UG~H^JePNr}sYos`l*s-hf*gv*iMWcyo5@eV}{qO6iA zx~#k3VRvLX2~(Chptnjx;cM0;vxbnIU+G+ z%{JEMzQfh#hpj64q4FY<;#`Tad-bNXLfd&{>mKfmj`VP8JB9YLpQt`00| z%D6?V%c~;}0x_ZpVlVP&#ff1b;iw3+D0DFbIp&-^OJ~!`TSL*o9RG~Ql%M zqYp(-Eo7L79fdyGC1Eitl<~2Qao%Yc>-?Gbg2t0cId&ACiQ7)AWipeoq1aPmNx zyx>xBP69)4t^!q21;P#tqK;PwI(H=Dan8WdgwRDWO)6nFVi7SKjDW)B3V}_D*n|u! z4iv_nAv#Iim=|p(2`uof4rDTfo1%nuEe=BDrlTYZMk*MZH3(7|MoPH~P#T;iYJqaS2AnDgpojv9gyT*| zSVEB;wN^zUXo^s^W0dX4x=qnwQ%C|jamaUDW}=j6sz9NLi5e&<3PNHbDu8kffyhTD zilwI`QtBw8gel1~XrPH=NfhX|TsTp})s$7oBFGGd1umpAK$s{(f*=B@*yNQAtAOfL zY^#*Sn2}6r#P7R3wBg93k==Kbxp!{3si1+Ac8f(e>5k>bXgQmkY1N-TW$5$LZrMFq zNxATJczEeWBJ<15mC%2mcM3Q~1XL9)WWvBeSY9~Gt;^sA8ZeLEA5WsQhdQlNs*`wiWe?@PfRX6 zfg@ap4kmOv7@i^6>f#6)@ja_U$(p%UE>ROBN>nD8{7wR6BeEXguwk?I1Gm07RZ{vIJhn(9n+i-tFO+lZW8Gk#I8~^;MD5Z3=kdO2 z$^rO&i}Fg2=vBrn6k43ub&4ac%3w$PO-a4#y+!zeXcE4efKrA^87N^6LenIjb1)0t zAax1#=iK0SJ_MeS_-7i$K`@A?YAd2khr!T#3LH>4xB?$LMn)FAP19Ta2Vutln-cucXl(2&`Aot z3y60*L01Z65c4$vu}X$t2cfo*Nf6g3p$s(o*e|La4+lx{_tYR~J@RiXtYTymk)Unl z*i50COl1jBnq$(zaI#TkJf_Nll4OCB9LgrDXiZHcs2%Zvs0M8gAY(BqVz7p?3lsyI z#Q@bpx<2E({3OpmAEw=F6uV$*G76IGGJXcv8@(zS5TC-`^7o#KsGVA76b8hn`a@_RafH)P|v+urdc#I)JBKLK@BI)iNDHe8ah^)ga@shAvdWf7vy zRbn1m9u*B(#=~@p74iP!F}Dhgb(#dyzfonAv)m&ULeBwH;M+mkqwVKi~5G{Cf<2o_fO_FNwvuu>?gJ zsE7=QwDjTN6)#u^`cxL$4^Zp>t` z>7n&Sg~>eb!yU7u>V1(x%qMHXnZR>pt4Z5n?DB)!oXCA?jyKiPp0~WO!ocyw z;DKvOeY=6=-gGz`2tfU!60}ILAFzR=4tlX+F~m5KlT%QB;)z2L+2goRh)Oufb=Oez z=dGU)A;~_F$?pno2%ck}G2EJhfjY~UY8Yy;Spkct5nK-A(* zMb9Wn#EYmYAZsC`qHo_S)S|KBvI5RpZ)MgVm{W{Z7IjNu>u|Kz2Bh&IbkLf&qhy}a z%^sa@)M=Wo8zEV%G{s(A!+_J7Z5iuyzVaV#-%L8losVcKn!w*jtng=0c)tpI->iz- zRS{7H^sLTAwUO&1XBdhU!C?t9Lz!2RA`0>1+|VAP=Xa^qq2dn zJXtF>pxC^fS2g|qY42s{$ab0v7McUP9Y+W389t+)r)vH+y6EK!6LDpJ&k;)O^k zqJ!zu@s#-+UzP37-bvjwf=}*<&2F@8-t&on;oFa-N2m(@p`j9lDOQA~3Q9zw1I|El z0LcuMDHNd$m1qowC}^Qt7LaKcl7Iq%3KXFz5|aW4APS{V?}c(`7JxrVLm&nrLWQAe z6rzHZ3RI+^C={hy8fgjzC}IUE1>Cs^&`>nD?Te`A#V|RSnKGtfhL;+Nc~r;RvRb%Yfjr9eL^_Vo#{tPD4FC!qZd2i@`~ zkWQ#{oWtw%>~=by$9hRg9WQ@Fll-G=r+OM_oQ>#E?CN}T46v8uF=!Wh&5xJZ?mk-+ z%4zlQaD6#)Py1)GJb-jPEBLc0)#12 zh$WP)i6|C}Bqp%MTw75?ys#%wggF@i>e7lTiba})s7+;cgn^V7&YTK!iBs#1E0hHd z!MR=_cFIy@rk~5FFY5>4><1yhN4yS_v`k6-vaySlQ_%z(#6fshHMb$l>fR_0aU$eL zg3+fIJGkLg&p=sl^vlQ#x#krBLJ>jrJymiZeYgEDlDzzEU2~CBoLQn=JnE^IvoJ0t zD7kDXHeXw?zq~{<>C~Zlh&7N2(m|ps2!gaQnov9lA6ZW?-TLr}`@5W?W}n<>A6Y_x z)Z-&UxR8f_{-j-OKQpcUJB}3Y&F*QtF&%03cqop3iBKHtah=G9A>1Z+tvj4f{x|e{ z899=P2|k&VEv{j|-s;~2JUnmdfsDwE;i|G<4s0UyaH-`cn#KsqCa97DI+PStQCx^F1<2tB!Aq~bof2SlMG|(bMk>#Y^OKOk z%*+l?m>W32o*W@d46tpUPb^os!z&KS=))i2m748)rqhZVd%G_7W)0W+>Amd_j<7`U#;WIP$@%3Jns_&}Hz=|w zXy7n?A9R(KIFLM|G=c1+>;4Ikq-B1_Vi8_{*!!O+1WKbMD>WwPtr(NjxAgcZW75#a z*3;83@LjI%FBvaR=3eF`>yfYM!|v;k5OSPrsje5f_YAx7nX8C3Ft?1 zyxOvt?e9jkY%vT7ro;^qKdt$mx6}pss@r#3F&MBa7z_c1DQFUp~@~W zJOY5B_?(Y;4_MT+0oKAP$qZtWlTdc!6MiVV4)_OtHX0PvzII5&c7W=ABtIH>fSR18 zfNO(U6encYhrv?Y-pLQyib(@+#^^nh`uZ1R_O@TGyxqCp@a%$PNZ2QE=x8M~oWHI5 zJGa;&9HWKnDGhEXrc4LI=5y2MIGhtx%jQpA%!r&3X9g?f4sQN#=d)}3Pwqd7**t;F zog{Adx4x=)n~&)pQU}@{{v&yjGaNS_b9SBVGx~2LCxTdpxL?}0JsV+#!kpWME3NUk zrqi2sDj3P;XyN0TPBc|AxpTzTE%obRk%;o%wFXWkC+;vc46w2hWOu%uHKpChdh3#L z?+r5twc6E^VG%17R4RlUYC!_`!ghws8ZkR_(v(V`a4f8ae}-2ot!B;#q3Qe|Y=rbo z920pis$9JiBj}G&R#n3P7})Q&rQ_@3r&fCC!nyNH4X;hm^h5gwRVN`F@^InkkSbQKrFmDh>DIFiVxN%ya5N6Km_x89w01q9ZOg%>60No3bJzFCd@u!*tmGbP zWVWyyY<&2`Ac7z$ch(Lada6Z{*^Z_9Z?5=AeypCr&cBz_Wvf`RBTH8IqnNX83 z+0G_GYyE}MualkL^2y>5CYlD8G|>eSRABpaJvgX(W)3LoA@br~-92;ehe&&@t^Wrc z<8!y1cT<5>*!*F^amcJcMl7*Sy+cf97*F|rO>k8CYrWg#e&1mDl z$+8`){gJo;8yfi!m!6zP_0!Dy$g|pk_8-nq+_+gSw2CSg)t+q(n$@HyW~sOINfrkD zD4vFMcBS+}r%OMIZsW3HdtZFS4YHj1vsJIO#v=t;%VQ52~?vFd#HC zpMnqUVNkS~uvru^xTvu|f2K?js0TwJ>f-jeIq#iw_Cl}{C?_eI3UxYRJc&jbcW8}T zsbxDZ9};d8zn))?55X%R>t>Bv6|{ob$Dj5n@_m1_evM)NSK^u~q_jZ*q=2}CK@oPH zIIN2{CUTgA%Uv?9;$^aC!nB(0tkuA>9-q1gn+U%;TziLpQv^^Z$ zk5kK_?JWC@^2xgtGT(Yt6-1;ZzHWITe7~3MAvHIhphQ%xNIGOamBm0?^%A7j1;tBl zTvR_lyLdD#zNT(4Q0#Yh(Nr`|5fnu?v}L6QjB*g zfc1qWL?qkw+UC#Yq(Kr(hfmiMrgARdXk3bCVuPM{+ZM;Ysd$9>-SVpZVSNjHkLP+8$BVl!m<&DA*iSYy5DeJ;wRLH`Ru z%6d5vb4|rie*jHDvcJhVm{A2E*?lR)mFH;=LQq#dS^6?92Eay=K$QHvt!U^?Mon5$ zAe1s$nBp&uGpyAUa@dyDOnIMZL%+cUNm5aj@RbVV0YwEM7wM{*MAxn+jO5h(=-#96 z0J$QA`qlH?E$fg093OimuRDLV=wDQlf1;3Tp0NqK^gzPvi|!9Z9K-4UqObNk6H$ z2guj*s0$dj%G9Vn?VUlY6rE(KhZB`YA}f$u#O9@Qr)oJ7-@OwNaC+UBFLQ-{bA8U5q&2 zv1$G4Ja6;>Jwu{;5Yzh39jd}0UR9BgPKA8Wf2EtQ<$qS2NQk1Tsg5wj`=%sQtkz)h zyJWaatOUx86wc|Kc+FZio-D^mpmYhb;0ya;W9{ua9+CdE7IY!m?fj{~a1V3>!fxDQ zJx(vT@cq}?)SkNz8(ojhhP^0%UjKaIoAE>4$Cec@TNPw;GmfzHmagt1FNuBk1ZNXG zr!lVWQDMW?S($vvC$Pdv&ZR&@#*avA&#zeu&H1hxlkUTbn_my<`yWkZ78XrANUGH8 z6p!`y`hVB(<=-HjCak+kBPB5k2=@Y)E4=kcsy$^OJ|WKS>$uutAT3KQU^y3HT)7i+ zWD|-SaxA$V3#SVRqKXY|H8Djo3`JE$6qTY%{j}j*!~Nv@?>)ZsPUmwBgmW%Ybmbx< zYaq(jCQGPsjFZP|{I~pg4hhfJpDxnFVIkDdH;lMbjj_*b&CGI*nTFLp`MNZYwZST}3(xUSF|aHOU`A9;g94HQ>qwBQ(iWDOg4z2CZ%B9ScRH^(kADf;_i&hcQ-j5z@jT{YF_Hj;ZW=Tw61&bxKVu;6^3ND&5Mt2 zVyUsG1)@;|U^R~v3^}U|ixxn;V+r28$(1iD+^omiQ2`DMXD)M4Mm6otDw=mRVw0)8 z%>Q=-&i4*8xjpvhWW{*Syz8gFUiS)(qeZ6N81X9XW_5Rd%+vN{yB4K6JGVPjUawTs zHN50UFPM|!WjCnuPKq~m&aPYvn3$+}b)5R)-IT1A8$(_RhRBAj`4L$oEY&j(v?X@1 zdr)K{1c8WG5>IRkmZ!AT6bv0sTNKY)bhx$F33Ju3ITtE4MCm*Mm0 zjJo(~(CwV3Jk$^+ z2%RAohH{b};D2A`Bl4W#Pqpgdugc`d`8duR!lgFJsW7w)!orDrb@Tb&r;iZE<%FGD zPDaxF)$Wew^9}jO=ZoJPjB#_0YdvP-)lNmHoI3JDc9P`^xoRAtu)wIwG)Q@2&o|Eb zI&)raK*MgS0@FKo7_&oq(RZ$PlLaRD3oFpP@!V=VC zg0p*4uVf%WtkR_akoFA+7HMq+GZiL=;?cq+7?`t6hzKNWtAe_TUKGz>UBI}-G~LO~ zubG&f?agT8(TZbhl+RSh8Q-`R^O|Mle9pak^FsusG~tcoO|OY&e2}P~+hDlto2= zp)|_F5ljaYXR|!AZ>tV66&$4|gL)9*npPV14`cUPRt|#RkSUq2kdAWX5{ODcx#`&=9iVoDBrVu@QspWVn!K{(xlMIC9 zRR2^i_VmYp0xa!xF6zgYR|-o59>+Zb;_oIMgGK!3Bq*v+i_qrD8PdVgp(2{8GayM; zWC>s?FlYvurYDXiXB!j+9A>kG+YbA}!;?AmL4TmlE6qw~NO;XO6!8Xmh8c$Zc>}9D z4%5EusIYlTg%7us`#EUeoSi@%0D<2qAto3;!NWm9J$MG_^gIkf+5mQEAOC;if40-F zXdNy7)|tsezjQ=#B!F}}-whC0B&Gb6dL*ixPNc&7{UGY{7A6gqw+3bhK=6b+Fa-(o zW$h21*tvb;fhp{~ob}(Ec}Eb)=Y~AP%k;w;4@rq8Anm$bMJ6YVG&aeoA3lY%7`Yf# zN!QNyVhBHJJCfev6oSUJIMd%pqE%vn_x&NGCI+%G$!t{om+Z|q=b(OHq%oL{u}6G! z2ZQMe(~$7v!1VN<>iMM-bmi$iwsp_Nd9w|?V?R2WkxAkB$WW+rJtBgfbg;-X)1*!g zSg<_34wTsqnQ*8wM(HQd?}5jVq3H-7GX6xoO0QE7m%X5DV3Wm22fv-e(f9me!_vO9 z-gX2h@PUEKp`ed}lu{!#ZTkf>rif(C;IK}NSG|?XVOV-AFm&-EsknyO1r(?S@IoHF zxG79)EhxCbdLnonp>y1I+;Vj#BrV)oRTailEnzgI z(HBqEhghe?3c^wpc|K=dZhTZA{Q96_xqf6jYZt?*E*g}iXeuUvpouBmzgRUVu?Ab` z5Y#6SADKFS>|)rsCNSVBs7T4v;$tpGGrLJYZ7fS0O1Z#S7T9N^*&~_8i^=*&Iy5>QjOS- z7lbTIwW%A-%$laQoIlFUFrt$OsT&E&z*(z>1VvGgTZRErQI%~PNnnv*&Pq+Q`&<^f(TE|Fc~c;b6>LUjv=y&dX3o^iv`uQD%MQBXuD zaY4zHdfm{MamISY_go0&D6rHclv>gVIYCT0QzNpjP*oG$4caXZg!75s^>w&JMNnNh zvb^NtD2_lbHINt7Lv(A!Iq^T{SW zv~E{%J;2`c%j4N^cIP9ql@r|PIpnEP6;*Mr-Q1-T*yFsHnD<@RH=CE!%%eg$IUr7| z)?W3_Q=^lz_@yfmmu>|a^3sB%&hzga^SYDHnIY&UQ`HZ{UPUbOT*&qn zGKn#lZ#XVV&Ky~{WyrpJ&^5BNB96)j4wcC6Kw`%t?qsG}4#MR}4jhW-DrNC=j->W` zl*QU^^W{e(swxO5o_LFuM=B~JBJvWZW24?XACxC%3Ff<{WB*PGlxduR2_yiY7pM1J02X zl~c9R-JWQ-4P1hq&j%7usZMp32xLx~r1P?#ctLa0nMJy7DCcge_eN8_c@Hm$nFz$s*yUEL0trC%BJwgZvzO<0HOx z!-Q~A4!fljkaL9$xlUyVW?@QOoyyb?LOIasWPx0WiYSs0s>cFQSj_YeV-eN|%Bdz6pzo{!Qj}6gw)3IKs|TI_<9uwOxep|c(?2V?g+isBNKvSFAZk-knI#;-q6egxzV;Uc zCOOllNyS_0Y2`xid&;Jxfh(LAvl~gZdYjq@)>+5I|?2; zUAZI}g)zjH4RwQ=kf>3GNv`ggMHdY&JAwy0)+Jmj;O9trKdTWntSdN&6O7};Ic@_F z^Sg%=iS%;io}lxyL0>lQ9&IN2?dO+r97Tq`%4!(pTR7qZ*kyv7;u>(oxj^!AqI~Z2 z=wj=2V=zT7Wz10oRf;OZAM~EII*Sd`X{_CWv%mMhym`0o=3S0V&wb~#MgK|$LDzbB zAL9pz{c#|=cl#l#;!^@n*?g!^4U6Q#(kQMLnP(a!hcwJgv0L7KaYLF z9Kt&E$wUy_i&G6-{q`K8x-Wec?dd1cD-o0Xvcm+65(Jw8&_li3*KMFQ02j%&UzMg1 zJA+^dj|r#_#v$xmp=8+dwY{}}S?!fBBb@RQPWv9CbU-q``e{7l#K*q)q1Jz0mi zZtUNFxvsSR>j?sMCb>~SA^!w-$(8%2I8b7bk#ol{Q4oR0t=r(zVs3eFt8ZqklK zt4&Q=4o~3ApS1hrJ}{q3w$B(ALuP#DL=Va9pj5EeT;CKvS#U}jg`ZXuynR{Y)bjg^ z#v-Or_JYYMI-8pm8ITxeFsNh^h@~YYCTkGUpv<)g@$v6~JWL&#^ zdCyKjRmLZ2%Y_UI6NxGmT9*Tb1rStGRaqw@yPR{m(i4GjuH$*iC|n9Yw+?gF6R?x1 zArMm_Pm2O~`@Zr#hR~{uzFI{Yg&Lf=R#w~Q{|0_1JDf#5>ymX77c}EmYgS33W=>+! zf+P5D7mh~`Fv7PRS|w&}>#5D#cMS>qyjBQ;yu3JBnU^Bq9o;b8wL#_74mK*hLq1;R zg6dnH?c`9_)ws=d1z2UEnPz4K%^|6Z4~>F2l;#(^+^lY9V@_u`auq5f?(P*7dJbd6 zYb5*RO6lJDD0&pC5qp@@9lECY#Gg7oIYO@vJE`LCTA?ROioRjbhuz->+&787H+*#2 zAV_kTeB~#c9zzgVIBECGyL+cH>B8fdRHiOkVl0;$w~5y$9yv-@M7m+Nsh2l41x(7Q zn3&zV)oLRqCb)+1_jqdGK0UDe+{xW3)sd0~Zth!6iN$C%(8A>eR_NjQg_xW-6a-ui z-4Mzkh^pPl8@8=clAJi(E-{vOI0^Sci09t!=!zPS(7_E<`Y2{mG*PZHph>yLBndZn zefH8Rk<~fos91CfLc&hfL!^*C4oj_@-i;WN$p~6<%k$J1rf--E@o~qw8r?*SmXf)_{7X#0%Oj{Pp(w zZE${Wa8oITc5usS>o{%VzOtHiEG!JKTB^9c|6Wcd$%r%8+m6r`jGxj#%k$`-a&?MB zj=y#l7n2(@r4}kMM%n>(mVGcWeph3m=>_{^;3rUCR7y~ps^rM^4LcS6*5A$zoP~h= z`SyNgTQfs6_z?%o8+?a$hH{3k-r<8|2AYvFKyMlv@BeKVj z=bNZ{?Rt2+*mm-FLm+R>zDAglL3Z*sdrpr*F)etmvDJVvWbbgt_94&KfmY0-1v+1A&_w-rAkXwK?Fe*Oo|Gy!AEI2C!wjRdI`b@ zbSefnx4*BGy@j^r6}XWRg2rOnMpx4;f%p8L%Es1v?Vl29#D!_Mmrt8`R+Uy;tC?*S zh%c-2$zk{~(oND$;n6&MW&!c)NSpcr#W_QFq8l00avsPjt{0@I^77%(?jNZxjrSmz zLDAT8u6_>Bz*V+_R+Xsm%P}pMR5Wq#@F#$wffT+B>Hw-50dpw9!qibFClhX1xU;h9 z-`#fO9@lh(WL5z~1LJRj*1jl+=j)%ZuW<6yhDGTq0NhTPUYSa_1K4aOx%}T)6Ys#~ zoRs)c%OnA-T7Vi2a80dlTcWMgjrwfuWKd8> z5n)*=i%!JkozH^nDc*TN#y+OI&xmYcvxA#HcNZHF`Ius7^ZmEuIL`TIB8u^)y%vwy zLJOB`S8kYG1=bd9i#70w4G2J_~i8cq}1N?ILq?OLCI~<2Ph*g^d~cPavl&lXbwHEC7UbQZ}A$g`RAJ?!z09P!|FwY{sF&&B9N+&4h*kMVtaL-PJ75%>C%<`2KeV0WfT-d zLf-4x!4IS11u68nPMtdj`8a+LI3&nNw|`4&mhG1=W;LD_ieh|1IyI~e%WkDZY8+vS ziIQ%7HWgdH4DUl?8_wtO<Dab>RS(Fzp1agw6V1WaWN5^GOR?dn6y{qb{7b$q6~y)^l;}D zz3&)2?aOD;Jx_NnY2%Llk3T<+8RNMJOvD~U={3D8fMt989n9Ox_72@Py)>9^g2;Uk z&zH||`zx|1uJ1|n{TO@ve*c1V8RZ4*_EA9>w%UbjJ)fR{^F9>%%)|4BU_`TGVi%dG zPG%VuRDA)JCVH6RjF`)rlZ}Zp^3*k)C)>^(wdd_kHnZ2wpsrgEdb;Xj6iHb#w1&%_tQHh!L&i0EDXM3+))596oG@>YB&AiU!#wcquDFu+v4TqBot;#PQ>iMP%m- zJ+q8CoX!WCo!WJgTjNoEb1~u4(HIE^Q8X169qVvj5{b2sW$6O8Y_g(QW3?86m2Exg zMRqrO%EI@b*4V+~M&iO`)u*(XnC2tLV3~&wHc<-fvmR$?Oe8d?6sf%KZv+Y+*>avh zhsJ;Bs4TynX!YPR?YTQ4EK_fjl+g8|_fR z_3x>$dFCbvHfSS2p3;o`UC!jJG2`Q?ERh((A*g=&NQtfKGD}TmE?v_*-7!oVIv{fJ zgmp||DvVRr(+wxCvc2|zy!Y8Kcn`AcN)YL2Sklj$3{Jxv^1&qylK=w?7H0=BO*L|Y z1mdI!1f1*C(ep8dS|Z1(AD3QPK8r^jK1~>Xny^GL)AXF-?BAZXH4~3>cfu6fgi>Rb z-m=g-4xhR6QTI`Pv8oP3l=u2~=>i~LS_8a&81*wX@+IkojkY%axqo84?wd0upKHqF zuSCUEcM7sQGKJ{DO~!qZ?+;XV)?8}x6Hx3I`Hq}E?%Lx(!gjZwAxYuU+-Jva)4Cyf zQaaflfkU>J2ytVYYq1xH=@S%H=n!p50O1c_ryfxD?ntG4)#fE5y$R|B$(<%F@x=o^ z4Y(`DdW?!9)(1wpvAZ3MAn^deJ5)$O&i^Hw7zkO=Fw+}3RxW`dS*d#%ZKoYwXSksX zjE-w@F<|lr)0YXEh_yvHUeMJF2E?o|-B43T4hVY+s?AjW!$+wHHyTdm-AG6MnU30^2Z7~@=SoT$( zy58=gCSTN2sF`;@TX*P~2;7|=J#iXzT1X^?7_0|Kc}%i89QkcuYVAk;A++Zj`A z^LKb4XlF^ZI4jA-9`+Yw*@6Gkkzr8&9kGgJIs}OCK6pJ9RMFd4yh^FaqsYTNQXWE}^1RGj> zH#u=9H>8*RM!?s%Ui{~L;z=nPtPP z;j5D8Bf+tIak}-^^Eir~Vc*@@>x-*>&vo)+Y)+r1b#ZO1Nu7Q#U+}VO@kZ+5Y8UIU~&G4N{L971%~~ zO#3ODwZu!hTdrXn9iXBUkeENbZFsUY4LYZIED zld?kR{)|c_IzTJBczyyq8aru#l@hkKq$QITdn|zsbb}g=`b!oTbWk*N>uyMbs_7tl z(?FXlp&5yg&J#>CTe%#(m+DDllBv{Ti{ZNex)F?Ts{%ClPA)N&I<{^Cd+qM0z!%$|zVVm6)JscCk^_xFM%Z z(W30%7h@4RW;x#|Vm(5ux>Sbu^vymmpR0sZ>rQF4D<#H`O73JytkDllnCh_9r54!d z0-Wx0>V>nHWB2!T;&<1(VB((M4)oaac;-9&R1p6EFOQhcVu*qq*@Le99!?-j!5=4n zZti?b3&XtM$}!n#%P@aM+k6;@8_NZb@RjOI{qg+Fz8i0CgN-E%WKJ_N2$FU#(|=K! zo84PJYZ7e$cxlnQFcb{x?Q_E9!Oj*uRZ9MrcB)wU)VF6*3VHxRp`6M)A*3R~m<_D; zDO zpPN^g_i@PaA+ERTYm?^OdD^cuDaH;w3-d+b!>vsmp1j|f-fM_jrq)*=kqoQaC4mIG zjQut&*oaicme@pnS|dpuCKSq4NNnx6ce#+-SIbV?rxmte*SqGF9yC-)O#6ZBmzL;@ zGupEWAd+)Eup!YN7CAoB3`Wly$?PcmeeI^tVk5*5;2={>_ZA+Ru{MK8-1#f+hDfs! zY6J@rt$UAJ%|#2t?O5+Vi=6LetPI*A(9#Ny78QXotm|DnBoez zU3IC`efTXVUP}i4llKq-sx{H$2lsoi;|kAK=C88JCJol$QIBbfHW=$oqCB|S4Ymnm z1#QO&6ezuvwW1BB9$Oq>vp+NQ#nbh@z8lhwwvd7~ufYrnYOO`&n<6qr7fD$l&xqTW zOlm%TxP?s#4I$IVJLV6qr<RU#p!U4*=mlCzIOaLp4p5HPP0tjuLrF6pxNny-`+8p=JwtA zxa$68r;FZcnPKxfCb39Yb?ed?9pBdPkkbZ1z(e)%|E zUb%6XR@1|#ocw+ecW)3GG6vsD*cfAK9E(>ID;4PyStHE`O>LNkIZ$>y4_;w z_0+esGq7|^&ELHA+{Rd!zfJI~5Qy2igKOP3wq?<-hn^=Uj_!q%CF^bZ-_K4o^KtGp zMvc5`9vsg?g_oYwEALaCWYyu8kXVPBd+uR3o5iGxokYGgsk2ucF|tcH(A&3NF!zM> ze(aEbZyeN0RD!wNsFi6U$nDv=NPcp<+TpB$OIpLy+PsocjynN@HQFt`$5Kt% zT{Cko+rAdw+RBcbT*jcUE;Zgb`7G0jyhy>16Ebtu$4=SUo!n{m<9f~zux-9%DB9VV zcU#f>+J7R5zwtaGy^3gMh10 z8ua>sb-916;(bew-#O^qMr{X~Ts)~^jUweSZ)kIG`15VK&F1P(h-<4i&g5QMj6anX z;|li&zPUYOx_69JO!{i;Q#hqpq{GqH6FB2&?e-KuV6pB_+jHAC(QD7p>U;h^B{YQL z17uk)K;YwP1vrL=kP1>Fl_)4cpymT1DN=>15|mn@>&b`)l6mDQ3W$iAVPQQ$*ahA| z{+Sn28mL7eq3hBYuX$i<6%>*(v;j&5Ax$1oxdkHjIqG$1ci%;r5tGNLN7+R-9_SQ1 z$Y6x>OUyJxL>ZYy3`0c}B7s<{wVeRoNnt1+MDXXC4kjA1SZD@v3S2z%d_P&PhOjn7 znbQrEiz6*6l&u)8d}zs)*WZ% z^PX_oj6FIfOXkDAuP|HDaSd9u6bD!lz>ql6q6lj7R-8E&m79oOu{C(sa~!Nl4P0kR z@m7d&pvtwUz2MrSviG3q%4cS-)F|0Zt2Wdfxt_eU$HeMpeH?!XDdH>zB&h*NumuwX z>k63rJ3Q@Ai*5RTVYtxyREWYX>7h`9YG$Lmyh0Xb#tUwfE@jJ=b#}pesn@SiUVj^O zrr~WLG2F_wl?;1xH8H5O%+Bzu;VZnA#q-FZ3RCZ4x$EA57l4R|7CB2X<* zrliPU89iYWQc?v{PzFho1=_@Wr-g>8(CM@CJ`!-BDipTdrXu1M0Srx~ImM1#DyHU{Kq3O&7iOU(3W=g9BR1Ki{zaor_r zJ~g$>U*_hn_*rt53OXkCO)^LsA>+td=}(euVowz&8lJrIh5dVh%B5Q+@`io+-+kT# zbW!v}dTkaC8Ak({?Y+C2$1X@G9Z%T`C&Lct7~>AvFFO>|2q<=&Qgb9YL!@E!K=Iz+ z7ZL;tjlZk*k9?`V7Ew;?9ML+a8O1OoroLU<(7cP)B0MYHT2tBd!$g5ojYy*WK>V;- zCLM7#4yLGAAsP^HmVg49qa}Z&qp4J)l=O_2f|~r6I{o?W8< z2v}@_`?7g<6i6Pfe?rg}6Mt_<&1&-QoWkud7mEz0vg{kMRjS4lAq3}_N<$QY&^>Gb z#syswhc%QTc!D)s_>mPN?W$UAK%6sKVNj^Kt6_{?S(;}uPN~<#4ZSe0Qqg^#%spVm zV&shYI8z_2aH^t7uZ=T<3Y@31l=tg3LHUH5sF@xEGed+ zREO(|Dv+c)vxa1da;Ugoa+(CHp_y?YF5S+f&q?pbQ%8iRLrpA2BZ0&oGKiHbqLq~- zQB?56WHwGC4J`m;894>-TaHVo`P_^NTnxeM}%C=y-v|km4RPu|O6n`z1vs&R!_$@ zV0;?A;=sh&F8aI#JVx=g#furRlK-t%4)nc1e&h0h$Ku1XG0$oD&^Den-wiV-M8 z(h@|IP$!IB##0r}LM?O;cR-V=g+v@UE84hFeBjA|p#%(|U{++qKV4wsYF`e$Hml22 z-{kbw<}seUtKcTks+P%@GSfMj#_^oWD-iA88k`nXg3UmaTz2h5NDG3RkAvJW_3rh~ zqe@a~!kk2*83-$Zmk6UoA#FtzOk#$aL|ZDz5JOa@z@jL|q=4Ugk9o}FnP`opa4ey; zHC(qaXg5^RQPK>?LR7G+RD!8i78F;hm~#r=T~!%XEUn|Uk36UG=QFQvxXKA5%V5B$ z!?g>5iY$PLxCFUa(1{&!wu@>zdez@Eyq(H3n!_<=zEcgQIH}}qOzp1dl@;Sn-7_gw zFxpa3GYSk6L}X$Sl%hDC!pLz!?_=L3MX3kkT`$9+o3EYRS)V;O$RTS=aM!zCA5i(@ z+>4!F^V5xHK8Sm>5fpJcht}dv^3*ny=z`7=sL7pEgbFc6wd3{0ogF$a#`eAm+G>cY zd-uUQa8N`CG7dzF3LG^L&=9C8-G-1cgcrm(ps0~l$OG7)G#8OG5KO9#RB9v)Y$7{w z&2Znkb{qMxOc@nb7>tp6F|_%U!bh!3-!pd|8cFoAf=G_6tWoc3Eblfh3U>ow;OoY_ zyl_q{iVV!DY`o5SL3^hg*llC0GnHhUBt#Si1qgo3DoTMpcTPEtVZ%Eeh%MFSWDr~=9}5aMUD zjCj@5y_-w~fmJ8hXksZR#GWVgdX%(ZcUsr`=mP}&Tlxd z*ZGf_8z6d)+LCOlYy0horB zA_g)b<(=j?kW|agr#Xvxt9x3}Iibb|1a;?X%PK;>^NmYwe>VSSov9Omcuz{v$6SUG z?x+$K`eSym;dh;`j<%ikt<}s>O0jjQ1XYgAWM4W`-RR(d=$RHf)PslH;O1{OF9x^Y zQD3>Op~TSS=oTYxi7sId@YF+_v93HDj`PAI2&y5>al0YTvtXs#@XTH)>ae2%XI8O{ zd&{ZUFHP0mYj}E36Zpe6uZZkj`F7Mi92Rx59j{eD0Xv+;M$NSnB$7oE3@W%o+Keiz zj!I^k7DS*Jx^JwwHGyCz)MBBt2@gztA6{5^h(TgIRROU;QBcMo$Qc+_b>j?ab{^PT zAU29Lqox#*2Sg)vS5}ciBnpuwssqhM-3kjYLvb^<8IfbxQRe|CQLd@U{bSACW6w4y z9ycW36HhT2PAJ1&%|O|UyChx+HxA}Eh+z;lcR3|(AWBBIOHRRjdBSVvmPP&pFNq9;2-F?=<~=GS61lwp6X9R(8X_Ij+|2jiuG@!j{Ngq>`Yl zatk^-^oWCe)odv70v|?;*pO(G8pH$5BHY`)GR~SyWeLpG-fkxzvK}ilSzzeNrlprI zB%sRIlPwRLjaAn*W_@n(ttgjl&aXv=L}8)Q>dL}2kwBX9 z7DXKt98HA4PB8PjZz~E%E$A5c01l{TVdX?Ju&;+w1=*3%v}2Uzb#chL*oLI5JU}) zQ!;eOD87=L%oay^vnF@yb#;jW*fv~yu91^LDi}2XC40N_l8w@B0ks->B5z&>XH5uBB z)p8KPS|Xi2N40Mkuz{j*Xm8qr`n;A|vRc;iJGup`vJfC7K73=(E$q8o|U_eVihH06KxvTU#u|< zK}H85{YG zVMCxx1y{;U^8`a?5t1B%)~X1O@M^%i8@vug8_~xucRJMJ*Q0Nzhh%W`ICG-suCk?v zkUbdaBYi3t2V^`65=k|?5(Id!RfbM_r2QDRz=jRw-VZcTB??J6WQyVIpc#1+frde< zG+9BUF)PlCH-{h%!Va#d5T0q~A=7jU-l^_T(Y#RWyk3mf2-cfB9KdNxio)d7;KW_= zPP57bO!%0=ra`+9<@OmGF0jU(KnfGXlR084qL@h|#99$3s5z7D%}=Tola|AbhfkS( zyg;GFK2G1JbgnEQhzjCHxex_5jUY!d^l9DSG@gxEg5hYc_U2}8$|{y2m4eWQxf+B> zCkY25m{`UdwzC#$Fd!7TgQ9BMe?14}&tQt(h(;kW(FB?!!$=rYan?QT>(7;ltQ1`` z#v;JN98qH2i@(8ILL2>`2!K$X})t4P5#P9f@DnXtLk&6Cd?8n$+gb=Jcj?|bF&lOStB zCLxU#s0-n2>fZa!TBGLFTo*FXVA<`6^OnSU8n6?!!X}CW^pIP?FeO_!7<$!= z!bco)T$4y8vNZ;)MdXs6$jKP1$~r(&AP`gGgAOWV4B9Rk>=`nef!T|i9%NP3aJHt} zTZDJ=6wHnevqfInm2q0kCl+YNQ9@>p5ifg>r6JU^1xG&d1hY~)` zXF@+|_ZGaSHN_fPn#%#IB|0I4!kY9^qpZj+%hhpnbLj+6HgJ&7Q&%FqKqK!nkc%BA z`&M{ichGZc+St-luakN-wnOM@`cn1)x}HkdYl{dYL=prj(dmGGf(ZhfWue1W=QVJK zNnOm~x;`UHj2NSqn`q@NrA}_=SMBGiHg|E)L@iEpYMJeLUH8qhIPha1FcKe&F?`5!9u2P_K735Iskf%->(?PgWU`p#iKYTgzjMD<>nv|tEYJB zDN+mrMx8$$1Jj}j0Gd}S(RgMMbjw?jb1-B%EUjl}@dQZTN{sk-uT&7fOns>s87#F$ zt}dF))kZvNbt|^>-k^+9&J#^Y>9;IfBLf=0L;(-1JDU^SHJDLBBp6Va(GI2Ss4bn4 z$RvGn>bGn!XVu=Pe4Ng5X`y8jNtnLkJ0THhH=^8oGLO{)v@Y%{rrcbcbC~$GJ6TU# zPHhURB#`=H>(j*vexrRQSh_EMl??&}CW#~vPY_-dK_c7dOV$eK6ax_e{g*sq$oo^1 zW}(nswpnfMu_~;Ithjw4vmhhE#N;!u7$F-R{e`v60(c2DS+2TG{Tv zy3#@wL>eTU$o5I!N4tF*3NO!@yHsS~%`$$BI+!Tdg6=%IqM(V1So^1~IB^w4T4ikq zD(#PuTLCmJp~Is?UHh(PwtC#)-->#^`f><)xxy`jO9nN#Vc~>&>}h4*?oSQZ*T--z zdQF-KNN(+-s3s(Wc*7uB3C%BTL1Yj?0HPUW7Iw$IEs>nO%NTTPE$=VU7{?14uDxmN z2Uv4I#_gX8}|aTBgw}FtJ4Q#W*NUvt&5?=fSs~7pOVHq7-ikj z8}C=?=w|lk6`dC5W(MybUD>s}?#8cY>RU!**D>Nexp!rtkVn(Dts&AfKQi55TXlqx z$iamL#JF5Fp@ziYP6*BVV1oppA}}^eurcU{&NzFq-ridF>#e7IW`yl&iE4=>q!{eR zowy=MfQyZzjD0;})1hfrA*LC{Z8>rg%%nu54G7sNfVr!bdTr`XuUb#y-FLKG(>7nOn3uc+BCu5X}*GYob241?3A3=)*ZX{W);;rzl zM^Yj{*$cCcY~!wJ#<(&_d2x2PLop21+UM>Bnh?!xCLJb7$s(f)*c>!^q!*WC<2;{b2$}T|>oH!OrNQ=J4{dW<{U^-7CH;Sy0 zJJb#oisW=Q)zLK&<++3C>p@pY4@(#t7^rd7bU+|;9?2T8heYCHyWXHQiB-;8kBrP~ z6Fv}1HF)0BZLT1+F|thJ!6^|8Ikl;H9C~a)5gapy>c^s0QX;wg{nz*%(tWnx-8J8Jt$16oY1VikT2#Rk$diI@(h7nIC z#YH5S^65S|s77P!?dIOxG-X^=b9J!iVu*+=l&a=APTB&Nt(wkfJD+Jxddpt@Vz5pk z#u{8EO@EWPv)7k*-jqG9qiO{9k|7~=VeFKlcW%vu?pD&8M_Hsik3>WpCF zVA6L@Lp#ignJ^So%W6Q2D9E;Ij5nF3m8;IQ)5*5sG~Hsv%Phkg84etxJMex zn#-~}a>08ZlSNGf6%n_^WnZLmduFa>Jtt)xK~rD6THl3^a=ehe5o=vMutlI48S?KV z7BK4Cksye&xWm@3N&2;6Zrh`W(rrXiKJ`)!X4sIi(O%uitFp~PD_Df;GG7*s@SG`* zK4QH)UFE$Ghk9Jq^Ts)12?8L(oL{8E`o3u57L$%FVq&5TT=vcl6!6-lHbIWs+aZmX z7F`oV{gAG@fUmM_8g`Zt;V$IipIUZrYk6;5TrzGrerGf$%FU4^-jwKGTveu*X_36W z@0dO;7Y44*mF;B1+X@LpoTPUML{k%O>0}!^W)fJ%D8)|Uj+5o9_!X%Q=n|lUPT7q{L1u{a2c^n%|Z7Q{85p7b5r1u#k zhR*r3;S!o1Vw6cN_u7X6Tvlj4m2H%UjA{a?5wZokjRKN&-*fQ>6W=TbNA|UwTQu#B zb^WXO;QmbxNOy(gv50SGP8CjzuA}q$X9+G8*@ph|=ekumHAwL}?@ffHSv@5BU@vy0 z7Gny@iJMsk2sy{hlJ|RoSIEl7T`=Ir6@`y5m7>+vrI7oA|>70gW&E@ zN%=ygC-Fs65ckf4yI`feuT)ok?f)esywNn>93VcHxUGIjd%}vNUnI-5D)U{K0 z6k9jEJrTVvwv%=qKi;~{hq!Q^_OxK}cB2nrc5Z0qsq1E8JUl`-au`Cw7mEvWYX?Ly zd z^mmh#IMqW#2pZ;^+C3`J(Y?WztHy?~u(Y-rDDxQHr+wZ#!Vo!$F6o3AVWCRTP<4ph z3^h|;5Lt+M##Q2_+*6IDCY@dPdb|S@Lwb#`W8tmeFna@BplY-{>QKQSXlWkM1H&06 zZ^9GSdOvAcj82C@Po)?weZM^6ouQ&(x3s)g!8hTgUYcyQLKYJdIZPAG@_?voA@K3- zg}*msdLSZI+quA^o$Vh=A9OpGPs=qHFTZEZrQUO5yqDkwEE(q<0bt!p;j$*)(D%28 zL@}Z)NTM0gE`tT455_lQ?q}o|06||+LF|ZO64V8?nPq6?vlBk{?li7N8Lj@yxeve1 z9**m~`|8H=jbf`L(Kc-_YFAD$47y>Ff}ZUSV8-7t;%I_uq*HI7Z&-QLo__{njMGbx zuJ}V`1XLF!iF|h`w(0*)`xxzYeO z$F0|QoscKh`Yw$O95evd)eVtI!x}eite<@JFYNpjJ=O*qC8tN;}=? z0hhP1;$u-z5CRdiR9I?2^QDuj$S+@6VVUL-^zo38Q79v1F;)#wp#i556yEa{&)%-L zyu8O#cc03`j+6?+=*}D%yxSpzBW;WYAzG42h(P*gFh^Ywa*dC5 z>g>yU9FJBS#Mqddg*1w}q9vg(92be%N|h9$uHw~L^b^`jN+L>r_+d&=IRoEVC=`Iw zY8-$_f`AG_nouZc3K~!+clr7Dmpp$bBZ8l=cqB}zKk1tlh=LnTVW8c@j^0Hi1aia?kF|Q zb;c1spsGVAlJ%wIxNItq@Z5{49o>G7LcISnFwUG zxiJVBqRH?^(ulqcl405fLMSj$6s0tk5ES@c03OnX zC}0NQp{h{kXdIS-r3wOoqNu7WgrJCMT1c8CiYzpN&&%g~-<|_+LvkFTYHeQ}Q#=cC z8pi0yiTM4xg=lPk3BnoW%%&}D<<4e=<@XIlgx2iaw!BcQ3rhKFOk%*QZwQqAxB*CO z0<-M#+7q8&9gODJX3;2*lrS8(yfwk0cBR;~z8(O2oPHm+7hp}EA?wayqOfE4vZ|;1 zrKgilLCS)N^h435GKz>JTMT4G4cHm>&UVjYojh>>^guh!DwGe_YbUfOPFd@Qr(3~t z7XpBY1T&WH^M@hkbCnTPV5pYa%jkh|f$6;yug4qe;BKDZ#)%zYue;j=E7IQKEe6Z# z^JfPH4ZYgn8mD;%&W}!YjJQBVf+m$FgFj?JyLHe!;awV5*+dl8M}@?SSY9)Mo|jr8 zcLFeXPIs4fXvGbu6lgZDJkiLxxVbFpSh5Y5V}C;?x(M;3RL^Y5CT}@U9xl!zq(Mpf zc!npoxXSVIhuZEK&LP*FqP;?jJ8gvRQ{MK{fM`P6aYri6TL=?-EsQ!S7;G_YHinB1 zT!AP_!kMHB$Y%_>-dAX@rWh|-iop|J^3OR{>UwZhPFjjpMGZ-r>n>XL^U=7dxt=+1 zoHx#7Ju{A{jUJ_IH5*Sh=hm4zn;sQHsSin6iM?S|^E?--JyRY?y-duq(3zVD>~;>w zJ^g$1@i*v+&_Q0h9?YF*bRM)*b9h_g^y(?;Vum{2;G11f8NRHpK;lk&v!Y6()7LR5 zL!=h*H45Ybk~E~i>T0s(DYWIZc$LW+slVPit3@3>dDwE<&^cLro*=G^R*VO;8l36j2d!f+LksNr}X$ zPR(;Tz;bPrFe$Q{ObR4{NEr%br4>L@l({7Um_TGCMo1Ydlm#FK2rj_80VXGGC{EgN zRAN*V)h@uGpeQJ-lote>outB>2RmSpCj{d{X$>k+22>(Yj!Q$5rb-zClmlil2x=Wj znv#JDmB?~RX(^@40t#4$nW+jg!ZM4eP8CHArpu8ws6|v_2Af%uL<>bLK~&N=DM_eIg*_~VxW=+U?)l+O1QW+NJWJ~%W@JR1k*QBO11d^GL_@+^VkbpG z88l~yv(9(=SJh7;>y{)`_u#dqU_IUAr8f)2jTE$v2+$O12j&&Ubx?zTCcDDkAl)Y1oA4?TM8gMfw^d6A`b-1CU;@5tnZF-=yy za5@_ZB&80BvDa=AhcHsGMJbIdk{lm*VUPrU7kNmedCPEM)R~l#ebDTI1hKabgPT8- zKWVH3yW9D*{&u~p{U&ffOZlt>Ui4t^K}%pmRo-D?5KJZ7E;XQO9W(}XlvP~cb9_1a zXIABDcG2vk!_zM>PV=JRfZ9eQO8J3I^JAPw(FltmLK9jw&4etH8XC_Gg<8Zgk~Z5= zyJBUw&KlgpPDFz8LN&aaaBNPP8p(O3oYn)8j3>K>Ud`-5nbRB>qts_+3jkIMuGxKj zJzbdh<1q`D1ymHFVL<^~V7eJ)`o}zkJ?`A{5_5E?b<`=Y_jXRa2&$%c1A-KyPOS9P zQz^T=GnQTI+_NCe&d6SuDKRPn43OmF38M4JE;$^W`0qOQ@!X1F=L&8`)UhMDLvyG# zkn1%VI~+U!gMkaXvU|m9=oQ!rSsx(WSe}-M9f%28_Qb0??=mZp86(o`rY`TS#&D8R zCKT2<(n`!NAqr2ZhFO0Wikh0IEY{$Py(zWFHO!P$IoljBTfb}$%|NIlDinwePjr}{ z&e&=Vs@D3>f6fM-3E~zR`&CVGt1c7A&49*$a+;) zLnRF$9cu&b`JBB%A)sJ|ge0M}%(*xSvt5IH)1$zHn?y2B=5>4`X;d@`nigqr3T2A1 z+L^V;dSjl%3Gl!OP6h}A^pw=19v}an$Kf%2zhc=GMFt472IFmxHe*~iM1iKjzi;ZF z>`xw~@_TiBf8ZECAdkWQ#IC6GCE66;1g}gE@*ml@qT)mKg*1@G6cqw0J96u2oTQ?t zD?j9%BKPsCRGTE(!XBqQ|GKdAYy@fw6NeeZg%yXH{_QqNi3M6Ga*k3#8z=l^6x;^0 z6f$WY!`I>8ZG!zj8^%bOZdGW*>B{fzo!Df{FW#B_Zj*go$mJ!KjVn?0 zXuz;&U8R*~5ln$}2hbNoPXPeB6@HS;0xo@I4NlkBW8dq(=g$9^%iG8S+t$Ob-Iva( zW>M4ApKD9oH<_~hkUmqAeJ4ZbJ9?Yz-=J*rL&_fDhjs-T5T&Y6f+AF+l9G^!p=nl; zN=R7_^1YAm+429TVxojTp#XZ6rG=dU@F$GAkey9`xY4IbqHh$|COHQh9-iJv_L8V6 z0+xyaJTnG>nWCB!fIEAoQ4p7}(t5)MT9P z4PMcAhw*=(x#wr26PQDCkVPUrqbtV~dBb`;v=5(Ci_QY||8und%{%IUo`U|m{$tVk z`{m8O#h+3Kw+rA3jKS7?6{&{xADkrjF;`xX`XXXF0EbmjjlQf3+eg?YnpW@+;vL zumP{|9{)c-mKD`(gWto0HQ7NoDA0^W3qq{`5hxv*2htDVqrr$r#E*G1F?+x$DFuoj z@*ySJG;thwgvtfBGD9HQw2CRC{-GQx<--^Jmu`{ctZvNExUNGN&;@ww-F*ai_;9pU|#?a!x~ zF;QG0)5`$!ng>yk6fQ#KI)T(Fs4FF}LkUTenX!lX=$+dqcx_r!;$d0JJDP^-Vv*DxVtVoIq2*@aTYAL<3RNwT> z1QGSx<};KsJ2Z>6sFsByuWEiNh-q_DO=IDeQm(`N(&&MsqRm}&jFKqf1|UP zeYT3FZXVftEZqjd@xf9gLh z>Q7is?CwDRh#CUk%_sFo^6`BkbP|V18b3J$JcK(O2Ii^)w)(h(5;9{mD4C7^4iLr zYT|QB;Yz3$l@xB-`D{SZ&$RY8{o9pnP@s&dT9pfZpmgn=QZxKU9l<0NMvS>hq8OZx z5z<043x&>A9Xdv*BlS4tzJ2e@+8)iUSg4^vr%8oL#<` z7fO8U6{X$=V)L4w1G0D6rg#;YbEe}*mvXM#{LVWnpH(*@Jn+NjZoJaZT2>f`OtAgz zStA&Y2%zgIrt=IzV4t_A~I8+0lCLq~^iJl6{DWhWGe$!U9 zNEZUs21E;d@8K#j`u_3$f*<2i7MU8HO{@F6CwDZ}Oca7q%=U*VkyJ%60SHz zbx_ob6NsTo2PmSd)XlR2MK!4=uq7CLNd*Et$20QGns;F-nFM-oI>y%D_J}9Q-%i}m zU<`eri7BG0iXz{{86oz=tQ72)eE&?@Q1Coqn3!8=$YHooW&y9C&%$-ul<9%#hTEnc z1ve&+TNfYFEKZob*Bl4HP!h2Imm}^h3=fU5^L!iU-H_?z#-6PL> zaRJFmQfIMu<$Xf$_#>6l5&SMs=hpU_4ed$CP|B1IYC+q&6y8&Nba=qiZ;vpcD5xsr zR#7BAGNw+E$v*C0oFFjlDp?tNRsSPsyW4>^wGjU?c#xAD1(?K5*w?|#v+qh-cESb!X@!@4|Chi)# z8d+0~UNWP$ioQbdcC=NmFtp4JMNQ$ZBGglv87uyzDk987xH4j3mquYtF%E{FG?@aJ z)9Vh?{2|kwK;k?gJJOyYs}UC>8ukSKAH(;q0t9OiQB{#a2CRVKCquFzC@vzlCv5;H zCCNJ>CeZ9jp~5+Fgg}!n(uM_g5e9**i%JUf2M%4%gOU^V9PpwN60oc`%rN3qV-ay{ za{^P(x@SFwL>Dg3cgI{E+sC6L*0j4p23q|1Y~%%WaXSOB_tOPff~-WYB}4UHsi7p~ zUBiXXF_i`85t}LamkD@otcv59l~9HGN?+Gpc?B|=ag@$PNjmVyk?WN)wl+erMk9XksONPTnShR6hLT%!%*h2WOAYk?93`u?50&j zMG$fe2_`J4iV((-vISbmy9*&At^r3d%aG*NMH0|eMM*<4yu`p#pY))N5%Mb%Fa`7a6K-2{oPD#>;Q3hJ|z~vMSWIxF(9!S_W z9WrQ=idKKm{OBp+bcU6eTpFmP17;LQ(}2AwUHv&{YCZB`S7mnW};^ z%4Ko`m_s3=C86|#Ck9lgqB2w}h#HVg(@!V?@D1DtB>hU=JF6a$|>bP zj##sw_A?X9RaGsPcgyY3709{YS{&A$kEtA4NfwonM=1JHVL>NO5FD z4G&3NkfEe86!Hf+6vBXZayTR>Oqit1!tzgeGeVo1Mw(hq#2X`3B~fDd=>t%OC{mFP z9$xgrss4Ag3V$jgXzgxYSe;Q4~m}BS_Ie(@Ka^G?b+v6+}Zal%|h4K%r<# zDw-CADkiOl(jqy8q@WE+kTg)G2vT#Hcp3A(`OCRS#tOouqb12`T#|`|=27Fe6dXfK zRJ71Er6njX9;Db)3T4N~s|O(UDhKYFFOOVA?*`#0D4xL0kN>WhJv>_8o zPe~kjQD(wn(@aE);Tpzl!*1k|~nW8ElC{)T>mYt4$RZL}Wo{ZV7aM z!@6O*@g#@l{&Qs7;+QD3MO(Ud7if7Ut7 zfubxyB+DCPCosC0!-K1Y&A%gNT+j9)c8$2lj}DTuPOur@U1SefWIvNr-KsFe5onIf zFdS8GqoxrG+90Je=^~h9i7s?QN)uRg{_d#onfvp%9yNFVeadW*;R_s1IE?8?jJ>u& zkA}xCdzZzx!ok0hf$6q%xHf87@t{4w97SknN7L}xUE%?_-(yJ%R!|RnvT||PVZx)P zyP)Rg+AU+8sRV*ZE}X3-PK*O{_We?ND55lRwZWRVePT!t*H3Mn9c@|2*6~|ajQ2O| zZmSPgfQbdwt?yF3dAE9{wC=QE07Utw7x4)>IKviv=#S^MYjyGDSv>hW#$c!?aRKAQgRXAY~bz-ax zi?W=kb4})+jq4cea9xxX4cm_XM=TP}&?cy#jgWQejS&bu^J9SfH^CnK_Q1)s+QS?> zSJwhLM`Lm{L>(I<3Ib_{CIsPG9IGTk5na*_sr0ikl#resCT&!R{KybCf&2L5d9rmIom`W z0axj+u!|8E(&Rag)?K&suGfSx_J-WI2_>Ll(g7fdKl%VI19Z(><#Z@U~1EP7?8Pm$PH$%JT263d31ig=Md-VvSKPi4#vTZ z5RHl;=WT4l#qUreuBS;;@#4OZKLe&7)D{MazuU9J1%m0c$l2Fs5m-^DmT@ml+f>qM za16jcSlY;%OYnj6{y4ZOrrV-JUF$e|HwAVRdXy3801n93Xf{@znh)LsCTB{u~o3Z!e1I-5)%0UZV`y%bAG)*uiU@nb@G_aMbva}g!V=-n-c|3ig zh*1R^ewb;^r;R0RlQMz0`UDodt1H8fENWd4;0g$-kh zhXyVyF{bJ#daO|z(hoB)1OnG|gEcf-DWFWpJJ#tjJKNsd)@v*|#qAAY-J_zqqf89< znK3$$nD~S}vT>TjGp|#gu6=2r8H;St??09=gZSywKK-`R;^FqxC82F}ki$n__3-n| zHL`+ecI;oYof;dM#Rfo}0|<AWRv@SvyB`cZh6tJ+QPU443z8JWH$c1iK6M1wq9v1esMJZKhBwOfG0<Z`s5cMk=e7Ogk#6bk(${qI)x-&e1vfpg_!Nd;;Kk-q~t*s2)3jt8H;io>1HUX z>0YdfWZW>b;e@gN4f8_8YixVKyCTE*udmK^y`tWqvdHhyc-xqX8etmFu1|{nYTy`w z-^dxbsB~cf-Y;8jU(+|HIUMBJ<_8k8JL9(!qo5pZh~Srjbkb;VnVV_G1~9Xu`WQ^ z^WyY#C&L}C{)~SqZtf5DE#)uqwa3FQC454UmhaEcVsuSvmd?N7hHZB0TIa^BFn*V- z>JCAW{G|LB1oixfe!2sF{<`)+`VT3~fPwX&`NQ^)%0S6dfC?6xHx5Xr;DrrB8eK>K4K&b&08o^aG$Tr6u0T))84(yjG-Rq|IV+HiN))wC zAxTY80Ygn>*?O9qH2a{y{uTGVWK38N+pPfGck#P zlA0-qrkV(f8j_@jsG4FRpol192(uv^3W%zcAg;L;t$G-X+alocD31Tbz}|2&`Y`q!N?-rt?ntK!5)y{B0AL6zRlEQI diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/a01e13c3e401957031defb62b05434c65b01d5c4-10 b/vendor/github.com/pierrec/lz4/fuzz/corpus/a01e13c3e401957031defb62b05434c65b01d5c4-10 deleted file mode 100644 index 4582464344334bc231319855bd8997bee60de8f0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 31 hcmZQk@|DO-Y&o%a|NB@5Mg}GZ1_p^d2niHr0054{2rU2r diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/a059044bdb0402471dbe9aaaa555a063a6bc1e6a-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/a059044bdb0402471dbe9aaaa555a063a6bc1e6a-16 deleted file mode 100644 index a0735ef2b06b039a9cca862a54d1d7e5dc1c17dc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38 tcmZQk@|DO-Y&o%a|NB@5Mg}GZh6V`+UIqq+n*aYB7#Jk-m>3jTB>>t!3Dy7r diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/a06b1a08fcda463f1d51c485b0e7271ff9048b41-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/a06b1a08fcda463f1d51c485b0e7271ff9048b41-16 deleted file mode 100644 index f400b22c08ff86d2adbf6b7a632fc9d48fef8b9e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35 pcmZQk@|Ey)U|?|4^5tPrXB1#yU&Y}g{f3}oe&NT8txSsjLeEKr3#NF7+_ W|NjOC2BR7<_5VNZOa|H_kp}=%cs2_F diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/a2e5916be780e35e9ecb7c42be52dd5e134f3363-25 b/vendor/github.com/pierrec/lz4/fuzz/corpus/a2e5916be780e35e9ecb7c42be52dd5e134f3363-25 deleted file mode 100644 index 67cfb1080b7f919b7688a12c3330016473cff695..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 195 zcmZQk@|DO-Y&o%a|NB@5Mh0F628QDQ{~H(>B=VRT6c`zV85kNQ3=J)f3@nW-EKN-f z%z-KxKUFpMm1pS|9`N_Oh8={VDqcNCVpF diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/a33252a74974fc86df30c311d501a1f363d350cd-12 b/vendor/github.com/pierrec/lz4/fuzz/corpus/a33252a74974fc86df30c311d501a1f363d350cd-12 deleted file mode 100644 index c2a9be06f713b696a360271716baa610f261cbe1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33 kcmZQk@|B28Y&o&_G6MqxBM>t%uz-am&I@V<-1L?O0EgZP8vpY%CHbGaY%CHbGaY%CHbGaRsSU*d diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/a6dbaac639f3b82609ec27c80fbd003684c28867-21 b/vendor/github.com/pierrec/lz4/fuzz/corpus/a6dbaac639f3b82609ec27c80fbd003684c28867-21 deleted file mode 100644 index 9f39c8e439181b414a21d5c19396a4389a0ae18c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 61 fcmZQk@|Ey#P+GZn|N8=^=qn6Bz(SPJ6`(!t%uz-am&I@XR#Q=^92VMXG diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/aa04575587509ffc65a6b0224d24ad1125cb0f63-26 b/vendor/github.com/pierrec/lz4/fuzz/corpus/aa04575587509ffc65a6b0224d24ad1125cb0f63-26 deleted file mode 100644 index d95281c298e1efe1b30345159672a9aaf747c23e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 91 hcmZQk@|Ey#P+GZn|N8=^=qn6Bz(To@uY^ygHvlo19OwW5 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/aa290b4dcc8198945311c8149fc1252f14555e70-15 b/vendor/github.com/pierrec/lz4/fuzz/corpus/aa290b4dcc8198945311c8149fc1252f14555e70-15 deleted file mode 100644 index 3e939f9109f31dbd3354fa01b64c0e7dcadbfd78..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 63 mcmZQk@|DO-Y&o%a|NB@5Mg}GZ1_lW~Fqwx$Ld20pplkqr-w7fB diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/aabb8fa4913c79f0a42494ad2215a32927adbd45-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/aabb8fa4913c79f0a42494ad2215a32927adbd45-16 deleted file mode 100644 index 3812c581c117632f16ca452fba409c9e6ff61ace..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 63 mcmZQk@|B28Y&o&_G6MqxBM>t%uz-am&I@Y6#UMghQ~?0AB@D#? diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/ac7077c5220abe6cd481318c42dfe6cb2cb2c666-10 b/vendor/github.com/pierrec/lz4/fuzz/corpus/ac7077c5220abe6cd481318c42dfe6cb2cb2c666-10 deleted file mode 100644 index 592c47a4f1cffdc5f70790a525d2714f6f0d5732..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 ScmZS4@|9o!0v0?pNDcr&6a#Yr diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/acbef0322169a93c7421902883cc8057675c953b-26 b/vendor/github.com/pierrec/lz4/fuzz/corpus/acbef0322169a93c7421902883cc8057675c953b-26 deleted file mode 100644 index 48bcaa723f0179276cf77cccee6203f2daf32be9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 195 zcmZQk@|DO-Y&o%a|NB@5Mh0F628QDQ{~H(>B=VRT6c`zV85kNQ3=J)f3@nW-EKN-f z%z-KxKbq?7-WG0N+4md4A=yt8Zh<$KiFg@pe_lp`88k@z~-C64LXeE0))vR OCxJ}1$6;~<$ejQw;5A18 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/aec95871bc7d87cae16c36a0d30955b43076aec5-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/aec95871bc7d87cae16c36a0d30955b43076aec5-17 deleted file mode 100644 index 2bbc1c02b4b64222258094cf6e370f9204b98a0a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35 pcmZQk@|Ey)U|?|4^5tPrXB1#yV2D4RY}g{f3}oeY$}lu^j8qGbJf6@R%)r3F!otA8&=A0oVJ=?eYN0Rz0FSQ-2LJ#7 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b27fb21ecbe6e77c91341738621ad7092c29bca5-17 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b27fb21ecbe6e77c91341738621ad7092c29bca5-17 deleted file mode 100644 index 0292f9be34d257c386d44010d47446736e5bfc6f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 89 zcmZQk@|Ey)U|?|4^5tPrXLMy?Xkf@MOD!@~F*GnTGBz+WF*7o?G&M6dF)%SQHZwMS bzke?fnVDG_SQ=Ou89-GSBB=n$F)#oChJO^s diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b38ce47b707326024fb24860c4365d58ab9f3528-29 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b38ce47b707326024fb24860c4365d58ab9f3528-29 deleted file mode 100644 index 8374ff944848c3abe63811bf477552434247ec0a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 166 fcmZQk@|Ey#P+GZn|N8=^=qn6Bz%r|iQ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b3eaea244bd47b64c8de3d81c7b5e94e421d7f32-5 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b3eaea244bd47b64c8de3d81c7b5e94e421d7f32-5 deleted file mode 100755 index 6c89843bc4d24eb4ffc350485113a38fbf0cc30b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9 OcmZ?L@|9o!0w(|lm;n?3 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b3fd355dc090a732d5cf3b25151f165ea901a682-24 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b3fd355dc090a732d5cf3b25151f165ea901a682-24 deleted file mode 100644 index 6274d9303318c5aa7b9c4afee278a9c36cd113b1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 zcmZQk@|Ey)U|?|4^5tPrXH;NdV2D4RY}6vb%G}e~rw|O-)VBP0b8V&5R6;EG*ye-^*uc YY-*^bYiVF^X=rX>WMuAOYG7^w0G+QE{Qv*} diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b58429fd1107617191026029cf327b2ebed963bb-18 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b58429fd1107617191026029cf327b2ebed963bb-18 deleted file mode 100644 index c261703926fe528fde51e0c9cbbb6b64aabdcf22..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6 NcmZQk@|DPA0009Z0Qvv` diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b58846d79a8dc960a718ef88dd3a06ad49b1fe72-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b58846d79a8dc960a718ef88dd3a06ad49b1fe72-16 deleted file mode 100644 index a1bb7d40072fd5f4b3c8491543f5d4343361dc5d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35 qcmZQk@|Ey)U|?|4^5tPrXB1#yU&Y}g{f%)r3Nz`(@7m;wM?CY$}kr%aB21Eb= diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b71a5a7c576e5cc5ba23845d352b2af16737c03c-7 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b71a5a7c576e5cc5ba23845d352b2af16737c03c-7 deleted file mode 100644 index 1e596c355ff642c3ef131ef634f44e5566a7e519..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17 TcmZS4@|9o!0u~?*VkrOs4(I`_ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b7815c3b5649d9a367ba99e7e09cf1f251ab6f83-18 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b7815c3b5649d9a367ba99e7e09cf1f251ab6f83-18 deleted file mode 100644 index 4cf6940c45f769d717e6cfb2e06ccaf0a6598b2b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44 dcmZQk@|Ey#P+GZn|N8=^=qn6Bz=A_a0suJ;4FCWD diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b7a5b15c9e2d4d659d421de8e3b463200f71f1ec-23 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b7a5b15c9e2d4d659d421de8e3b463200f71f1ec-23 deleted file mode 100644 index a47008c94c650f98bd7325a6d47adbf2cc51b807..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 143 ccmZQk@|Ey)P+GZn|N8>%=qn5i3=IQ`0U~`C82|tP diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b83b3d04ada1403578065d7f10aa7441830dea3c-11 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b83b3d04ada1403578065d7f10aa7441830dea3c-11 deleted file mode 100755 index 1288ace233d98353d9622a06742503716db48aa1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20 YcmZQk@|Ey)U|?|4^5tPrXJh~Y02}851^@s6 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b92c70d3f12e67c69ba5db9ad491b7a4e075ece8-7 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b92c70d3f12e67c69ba5db9ad491b7a4e075ece8-7 deleted file mode 100644 index b113b1c5fc0c753d313161951e1f512368fd70e5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23 ccmZQk@|DO;Y&o%azuNm)21W)D$XMC{09hgj9smFU diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/b94b7ebc6d153e0c99a97864f58b26f7192f66a5-20 b/vendor/github.com/pierrec/lz4/fuzz/corpus/b94b7ebc6d153e0c99a97864f58b26f7192f66a5-20 deleted file mode 100644 index 11053a55e2cf79c70420e5fe4b6ba643becd750b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35 mcmZQk@|Ey)U|?|4^5tPrXB1#yV2D4RY}6vb%Y%CHbGaB=VRT6c`zV85kNQ3=J)f3@nW-EKN-f z%z-KxKX9_c0tJ*n!XTNN|L^zj1tLbEB8LC}!6q{SbxDBDuK^hVG2axT9L)s{ VHOK-WlkE{EgWP6R19l?RodAn$HT(bo diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/bdc123d9da19a7ae0ff87ca0741002fbd8bb2cca-34 b/vendor/github.com/pierrec/lz4/fuzz/corpus/bdc123d9da19a7ae0ff87ca0741002fbd8bb2cca-34 deleted file mode 100644 index b6d6b05ac192e13916dee57d12dc09e639441daf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 41 ocmZQk@|CDdY&o%a|NB@5#_hZe3=K6Bd2j&+`)!$b8GwQe07n82_W%F@ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/be06bb3c3b604660fd36b2af8860d35e31c8bbf3-8 b/vendor/github.com/pierrec/lz4/fuzz/corpus/be06bb3c3b604660fd36b2af8860d35e31c8bbf3-8 deleted file mode 100755 index ffe89ef6a12a8b19bf48bcb43af961fcdf876f42..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 66 WcmZ>Y%CHbGa%=qn5i3=QOB0BsHpnE(I) diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c07f4e4cb1d0a34dc6899097fd27ee9f1744cb70-12 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c07f4e4cb1d0a34dc6899097fd27ee9f1744cb70-12 deleted file mode 100644 index 9551b7b887fc38e9eef806a4894adbc7e4ee5d44..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19 acmZQk@|Ey#P+GZn|N8=^=qn5i3=IH1?gonh diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c1972d0c898848e6188b69bcdbb7d14fcc780ee5-26 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c1972d0c898848e6188b69bcdbb7d14fcc780ee5-26 deleted file mode 100644 index 79651e1cf3f584b7eeefab5732bc0c7c0ae2c407..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_tvyhCBuai999-1x5y828ITP{|!L0=0Au66B2o{ PKmjF?Y9NBjpeg_WIzAej diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c2ac55a7fb702dd9a527b576d99008fe9b4f376f-14 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c2ac55a7fb702dd9a527b576d99008fe9b4f376f-14 deleted file mode 100644 index 2cbdb3a8c8b69d9f3b2f1ecb4d3ed30842ee3135..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36 rcmZQk@|Ey)U|?|4^5tPrXB1#yXkf@MOD#&v$uC#P&r?XwNGt*XeCG(o diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c2c3d29bce8aae89fed326832b3e1e1077cef1da-18 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c2c3d29bce8aae89fed326832b3e1e1077cef1da-18 deleted file mode 100644 index 468e64850cdb63c1eee4d75ff6776a9e79e0b0c5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 61 ucmZQk@|DO-Y&o%a|NB@5Mg}GZh6V`+ULaY6F02F*{{O#$fdNcNb diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c321670bbcd985327045dd1468bf2ac4ae7333e5-7 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c321670bbcd985327045dd1468bf2ac4ae7333e5-7 deleted file mode 100755 index eca662b7694b7094998f4deeca9a3b1b901b6264..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 acmZ>Y$}kZxazm)C~N?DB?!3y diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c652c46aba3567521f912bae6dc263b668c34c9c-7 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c652c46aba3567521f912bae6dc263b668c34c9c-7 deleted file mode 100755 index 5cfdce9b9dbf45290fa1a487d46498f712f4c05c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17 ScmZ?L@|9o!0w)N4jtKx1(E@V- diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c6610b87900912d462229a5259dab51ea0aeef33-4 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c6610b87900912d462229a5259dab51ea0aeef33-4 deleted file mode 100755 index 7a1dbaaf..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/c6610b87900912d462229a5259dab51ea0aeef33-4 +++ /dev/null @@ -1 +0,0 @@ -B*M \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c6c37f6c89fe55768f8b3f7b28b99467c239703a-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c6c37f6c89fe55768f8b3f7b28b99467c239703a-1 deleted file mode 100755 index 8c206a17cf90bbd82e1cbe005ebcd6b2ff77491c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 39 ucmZ>Y%CInWj8qGb>{?*|jDdkQkU@ljfnf)O0)rCQ!*Z42R!)vp%>Mw;uL`FC diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c71abfffdcf530a6d28fd99cd2c3505c61ef0ac5-8 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c71abfffdcf530a6d28fd99cd2c3505c61ef0ac5-8 deleted file mode 100644 index 490ee245d6b8ffc19f7048a59af391cba9e1552a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26 ecmZQk@|DO-Y&o%a|NB@5Mg}GZ1_m&n!4Uv(4hQA{ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c77304b250e887b39b5447d19b9c106fcebe7e66-20 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c77304b250e887b39b5447d19b9c106fcebe7e66-20 deleted file mode 100644 index 7f1cf184c1e69db74b083f3983b559de0cf83c23..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 71 zcmZQk@|DO-Y&o%a|NB@5Mh0O9h6V`+ULaY6EG!EYPyz{qW&Zzf04V@dU_v4f01K`b A@c;k- diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c78cd8530e6d8a606a28797552ce3f5494763621-25 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c78cd8530e6d8a606a28797552ce3f5494763621-25 deleted file mode 100644 index 5140f6f62cd9d09079e740107343de9fc9513167..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 195 zcmZQk@|DO-Y&o%a|NB@5Mg}GZ1_lWRULaWm#6Wf)6N7>&NT8txSsjLeEJy`N9a!f7 Y{{{vIqZ%;v|3Bp>+v6}9Xp2N10B_SaNB{r; diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c790308a65efa1b895bc57abe53e4fbcdb2b7d0e-13 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c790308a65efa1b895bc57abe53e4fbcdb2b7d0e-13 deleted file mode 100755 index c2479e6d87397739554a60f77c33f5f123a60a54..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 XcmZ>Y%CHbGaY$}ll>j8qGbjGmAd!oVQ*e*az|isy|;gRtZq9N3i<^jSg}?BCxJP+*wq9P$wW DIrtTA diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/c8b01a7ea9c1b84e4ee5eb68121c64f183e7ea10-9 b/vendor/github.com/pierrec/lz4/fuzz/corpus/c8b01a7ea9c1b84e4ee5eb68121c64f183e7ea10-9 deleted file mode 100644 index 56aee0515cf853d74a223441a2cfe163bb19741b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 34 jcmZQk@|DO;Y&o%azuNm)21X_Z1_lW~FqsD;>lqjTpBe`O diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/ca5d375d8a66727221d3e198d4ad360782944de7-27 b/vendor/github.com/pierrec/lz4/fuzz/corpus/ca5d375d8a66727221d3e198d4ad360782944de7-27 deleted file mode 100644 index 8c22c4dec7c989c754f375f9279ab19147fa6911..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 195 zcmZQk@|DO-Y&o%a|NB@5Mh0F628QDQ{~H(>B=VRT6c`zV85kNQ3=J)f3@nW-EKN-f z%z-KxKX9_c0tJ*n!XTNN|L^zj1tLbEB8LC}!6q{SbxDBDuK^hVG2axTT%ss7 fEhja(L@zkC1gsy~Ng$K$5hjD&W>f=qBGjD#Vj4A9 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/cb1314cc880a1a389cedf5c16cc4b8ad505b4506-23 b/vendor/github.com/pierrec/lz4/fuzz/corpus/cb1314cc880a1a389cedf5c16cc4b8ad505b4506-23 deleted file mode 100644 index 29567cd5ef6f04a0f19a24a897c8323a870bbd57..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 105 zcmZQk@|DO-Y&o%a|NB@5Mh0F628Npd{~H>BG!ugYBZDvlLxY5&p{0?5rICfDsi}cE fP=*1duAv55qbyKB2_y`b0h$2hPX$w8LLv_UsmC2& diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/cb635ef244cb6affc005c63d0bf8b52aecb1d986-4 b/vendor/github.com/pierrec/lz4/fuzz/corpus/cb635ef244cb6affc005c63d0bf8b52aecb1d986-4 deleted file mode 100755 index 9206cb93..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/cb635ef244cb6affc005c63d0bf8b52aecb1d986-4 +++ /dev/null @@ -1 +0,0 @@ -"M1 \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/cd67bf90feaeb1912792508afa01a09fe1f044c6-13 b/vendor/github.com/pierrec/lz4/fuzz/corpus/cd67bf90feaeb1912792508afa01a09fe1f044c6-13 deleted file mode 100644 index 71ebffbc207bc13b8584d97a7d8a4e9727e3bdba..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33 QcmZ?L@|9o!0w-KF06@nBhX4Qo diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/cda434677d4bdd969a3bbf84086349f821e39c80-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/cda434677d4bdd969a3bbf84086349f821e39c80-1 deleted file mode 100755 index 0d66552a94f9755ba775f3a5a30d1ae566e55710..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48 zcmZ>Y$}l!`j8qGbjGmAd!oVOV-{8Qmq@d3d!eIaYj(`HgRA-*6u8}Km#wz;#n520K E07-EVlK=n! diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/ceb22e7f581d85ed876e3d61da7df65da8954bf2-32 b/vendor/github.com/pierrec/lz4/fuzz/corpus/ceb22e7f581d85ed876e3d61da7df65da8954bf2-32 deleted file mode 100644 index 3f4ae7812cf5e5e3b504382b3293f8caf64784b9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F628R6qK!TBhiQ&(GM+RXa8z`8<&``t3(7?d(|38qB T$dg3~gGCU+|Nk?;04e|g4;>iC diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/cfe7201e28d42484764264c231663e6372e95ef7-14 b/vendor/github.com/pierrec/lz4/fuzz/corpus/cfe7201e28d42484764264c231663e6372e95ef7-14 deleted file mode 100644 index ad5308bbad6312d84f874040858f22b93390965e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 jcmZQk@|DO-Y&o%a|NB@5Mh0F628R0oAdt?^$N;1O$mR}X diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/cff88dd94ee94e1901d25a74e29ad863bb78b1e4-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/cff88dd94ee94e1901d25a74e29ad863bb78b1e4-16 deleted file mode 100644 index 50ebc75bfb4fdb40ad07fcc99111f961cb8cdbca..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 lcmZQk@|DO-Y&o%a|NB@5Mh0F628R0oAdt?^&cG<_4gkY?3&#Ke diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/cffc7573debb5af80aaddfa752538825275fd6a9-7 b/vendor/github.com/pierrec/lz4/fuzz/corpus/cffc7573debb5af80aaddfa752538825275fd6a9-7 deleted file mode 100755 index cac35b69..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/cffc7573debb5af80aaddfa752538825275fd6a9-7 +++ /dev/null @@ -1 +0,0 @@ -"MM@"©+[z_ \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/d0ae058f71e53a7afd648b859cd7485886be550d-22 b/vendor/github.com/pierrec/lz4/fuzz/corpus/d0ae058f71e53a7afd648b859cd7485886be550d-22 deleted file mode 100644 index 8a7600c2c87c4e9934e84d8911c6a12cecc93589..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 127 ccmZQk@|Ey)P+GZn|N8>%=qn5i3=QgO3 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/d24f23a23508dd6bc93ea6283ed49c8ba4b737ed-15 b/vendor/github.com/pierrec/lz4/fuzz/corpus/d24f23a23508dd6bc93ea6283ed49c8ba4b737ed-15 deleted file mode 100644 index 3bc8f21001899a43a7937c8ed3e35f33d458be5c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 49 VcmZ?L@|9o!0w*-u5{(a+2LOjy1t0(b diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/d295ca4c78f7fd3ff10b0520b09a0a346310e0a9-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/d295ca4c78f7fd3ff10b0520b09a0a346310e0a9-1 deleted file mode 100755 index 104bdc3d18f289e79d59ffef745a011a450b8113..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 58 zcmZ>Y$}lo?j8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C{x KvzUFH8ZrQ#K@ArG diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/d3ddffcd038a5646a53d48b684eac5b721c7062a-18 b/vendor/github.com/pierrec/lz4/fuzz/corpus/d3ddffcd038a5646a53d48b684eac5b721c7062a-18 deleted file mode 100644 index 0702c4f88e08ccafe37fe70250000fd5aade110d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 59 ccmZQk@|Ey)P+GZn|N8>%=qn5i3=PC$0HtpWnE(I) diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/d4275f1f814a5b24f7b4788d15f3fef7b2be8aef-23 b/vendor/github.com/pierrec/lz4/fuzz/corpus/d4275f1f814a5b24f7b4788d15f3fef7b2be8aef-23 deleted file mode 100644 index 7405bc729cc0462e338851792d70ec19bf4a9605..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 72 ncmZQk@|Ey#P+GZn|N8=^=qn6Bz(TZ;Pi9F;PAUT<14AMJJZu$u diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/d57eaf0fada8726afac2287cafb7720af7417b16-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/d57eaf0fada8726afac2287cafb7720af7417b16-1 deleted file mode 100755 index 50220fca..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/d57eaf0fada8726afac2287cafb7720af7417b16-1 +++ /dev/null @@ -1 +0,0 @@ -BZh11AY&SYà \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/d5c9dc3b5b4e71d902fe4cf5c44b237b104a32a9-4 b/vendor/github.com/pierrec/lz4/fuzz/corpus/d5c9dc3b5b4e71d902fe4cf5c44b237b104a32a9-4 deleted file mode 100755 index 5a0cc7def8c4e585e04e5adcfc52b647a09c47c4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30 WcmZ>Y%CHbGazQ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/d7855c38db11bfeeb474a4782f1ea293192f786f-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/d7855c38db11bfeeb474a4782f1ea293192f786f-1 deleted file mode 100755 index d4bba7dfde6e5c111d70903c50390f914cfc4c27..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42 xcmZ>Y$}lo?j8qGbJf6@R%)r2KCx(Gx0Rw{qgM%Ui3kw4WLxTc?A%lSeGXUah2w(sJ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/d7912c5e2a776c408e7640f10bd7d655a6a0f31b-27 b/vendor/github.com/pierrec/lz4/fuzz/corpus/d7912c5e2a776c408e7640f10bd7d655a6a0f31b-27 deleted file mode 100644 index 3df2af3fc89b18264e42538f1bf6b5eb08e2cc24..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 zcmZQk@|Ey)U|?|4^5tPrXH;NdV2D4RY}6vb%Y$}lu^j8qGbY*1h@>PYEOplBxOsA0!K*z-);;0ORZy A4gdfE diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/dcb49d3d45d32601fa27208cec33813e03ff6179-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/dcb49d3d45d32601fa27208cec33813e03ff6179-1 deleted file mode 100755 index a2dd20a4cad4ebc32b9f00cbf96b41c2936ba0da..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 47 zcmZ>Y$}lx_j8qGb>{?*|jDdkQkU@ljfnf)O0)vuKS~Eklyp{`Bxk_*=C&w!0e+d8~ Ctqe{8 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/dce9966b94744440d75a845a48c806041f5a6612-3 b/vendor/github.com/pierrec/lz4/fuzz/corpus/dce9966b94744440d75a845a48c806041f5a6612-3 deleted file mode 100755 index de247315cb6fba307228d83d2166c8108552fe24..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 31 ncmZ>Y$}lu^j8qGbRA4aVVK{Oq^<3rd^lSU058x4wEzGB diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/dd92516fbea2d0f96abc78f325d731053a451e16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/dd92516fbea2d0f96abc78f325d731053a451e16 deleted file mode 100644 index 5f62a794..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/dd92516fbea2d0f96abc78f325d731053a451e16 +++ /dev/null @@ -1 +0,0 @@ -ìª` \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/ddf986569f89016184b5b6e924d5ba827c9980ca-28 b/vendor/github.com/pierrec/lz4/fuzz/corpus/ddf986569f89016184b5b6e924d5ba827c9980ca-28 deleted file mode 100644 index c80538dd4374026fe627ee8dc69aff393be82111..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 156 ccmZQk@|Ey#P+GZn|N8=^=qn6Bz%p2b06T>++yDRo diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/de0acf1136a1e05cd27345ce135ea26abd32bbfe-18 b/vendor/github.com/pierrec/lz4/fuzz/corpus/de0acf1136a1e05cd27345ce135ea26abd32bbfe-18 deleted file mode 100644 index 60f4c4567f35494ba04df43fda31b6cb6570f2fc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36 rcmZQk@|Ey)U|?|4^5tPrXB1#yV2D4RY}6vb%#fUsm}jG_z`y_iZ%PK2 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/de33e3ef8a5780c7d3458188a423c00f470904d0-15 b/vendor/github.com/pierrec/lz4/fuzz/corpus/de33e3ef8a5780c7d3458188a423c00f470904d0-15 deleted file mode 100644 index 57de944141fb2c859d84250381bd99a6070db3f2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 ncmZQk@|DO-Y&o%a|NB@5Mh0F628N3N{~H(>B=VRT6j&tyt#k=E diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/de501127da94246b2d3aa947637b49fbc17d5e47-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/de501127da94246b2d3aa947637b49fbc17d5e47-1 deleted file mode 100755 index 1d6eb7a2..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/de501127da94246b2d3aa947637b49fbc17d5e47-1 +++ /dev/null @@ -1 +0,0 @@ -BZ \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/de702cd20caeb08a843e0c09b0ce87a74e300415-20 b/vendor/github.com/pierrec/lz4/fuzz/corpus/de702cd20caeb08a843e0c09b0ce87a74e300415-20 deleted file mode 100644 index bf568f6f4853bd8fddcef874115b0e9e53684961..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 71 zcmZQk@|DO-Y&o%a|NB@5Mh0O9h6V`+ULXlVEg%XcEDPi*frLRaHUIyEWFZuoEs+NR DdC3!7 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/de8abda1b9bd5628ca99c8f97237fa885a857bb5-19 b/vendor/github.com/pierrec/lz4/fuzz/corpus/de8abda1b9bd5628ca99c8f97237fa885a857bb5-19 deleted file mode 100644 index 064419b0cea2314dd9a997d13994314094564932..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35 ncmZQk@|Ey)U|?|4^5tPrXB1#yV2D4RY}6vb%d$0-k diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/def6a9e986daf0b268ef29ef7e821a9f6840ef2c-8 b/vendor/github.com/pierrec/lz4/fuzz/corpus/def6a9e986daf0b268ef29ef7e821a9f6840ef2c-8 deleted file mode 100644 index 61307ca8277c078c79268eccd91620319bb2cd15..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20 QcmZS4@|9o!0v04101=!4x&QzG diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/df0768cf0c709a1ff1a93cc0dad23979501c54ff-21 b/vendor/github.com/pierrec/lz4/fuzz/corpus/df0768cf0c709a1ff1a93cc0dad23979501c54ff-21 deleted file mode 100644 index 07995a45638fc772cff8fd750794098f82ee7502..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 115 ccmZQk@|Ey)P+GZn|N8>%=qn5i3=Oo#0PGYJnE(I) diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/df5bd5044e9b74c648b5f5fcb4dbdf953175f9f9-27 b/vendor/github.com/pierrec/lz4/fuzz/corpus/df5bd5044e9b74c648b5f5fcb4dbdf953175f9f9-27 deleted file mode 100644 index eb27e25eeca6f2ef6d53c1bf7864f149e6c5be4d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_tvyhCBuai99BT|BMX6K!N}N8yFZGYQPkjkjRq- O3Mhe80})gPRRI8o?;H34 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/dfad565009b0667ef2ee10ea9c1286ee5c3ce6b2-1 b/vendor/github.com/pierrec/lz4/fuzz/corpus/dfad565009b0667ef2ee10ea9c1286ee5c3ce6b2-1 deleted file mode 100644 index dd3288dd..00000000 --- a/vendor/github.com/pierrec/lz4/fuzz/corpus/dfad565009b0667ef2ee10ea9c1286ee5c3ce6b2-1 +++ /dev/null @@ -1 +0,0 @@ -* \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/e.txt.bz2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/e.txt.bz2 deleted file mode 100755 index 65bf3b4c32a4742140c57c59ed46a06f859f9ca1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 43149 zcmWJsWmpr8_qFKmMnaGd>68XZ0qO3}0Ry)Ala>aNhEWPicW+8}$AAUhY@?_0`u_j# z*Yn|ix#xM#ea^W@%idQ>TGLL@(k{eHL+{bcjEzU3w2vPDZ^xBT@?ZnMsQ_$;p)seJ z2lUKd-Fo@?F&Ki|+6LjfYvvrjX z+JlKb>Rk5Dtb7u)t2m3A$N@0K5vWCE$DxbAJp6)gfBc z9~#bMt{-}!ci3|5aS+aguuedW;v+Hl4-GckTm#*X$IyFR@QsW0K;twv22DUacjLoA zgIL{uj$a&+(EWun=I+b_}*91TwG<0SgWZe!yNM(FEWCz8@+2-zWll{Na3CE#P?id;kPDf!xFJ zJt_m+&LB7vg6(d?-r?2Z(Gm+_w2w;gc?^!w(&OSq7(n8V5Rm?L1?c_5*?${)PyoQO z5Ot9MfAk#~Bme_Wad(hi$bbA=!*}nyK<*Ym$B<+6{C!=}fco7)09p-##rGfGZD3() zM-Ou-=x!4b`~ZO+hk&rEn6>_UG-exwY1($`>ANRD&RqaT^AA0U@0bT5{FpdBP|E0Tms8|M~IbNBSf8v4Q_%*ESH3?GgKz>;FVT9~_1q;p;O~j%%~!DsL{f zi|m99m5N01@R0R}xb*7pDMR*Z&!9}~$8x=^cfMM${FfGlF_lZ38$EtJ5emg7+Y)yv z5%(>6Tk0YqWZ&Ed!;a2oxrV#Km)a@Tuth#P4i(R%LC<*Hob%h5f3d8;5Hwx#(|v9z zw+CG%jczBaHN_y~LhP$5k#9|oV+jHhK|0x>C^k+Z!)dS*{BS9rHhkpkhDjj_0%cDQ z280GQ&B-(-7s2isrV(3u=y+Z4PG6*@=CK?BJPuOAQ+ET4o0_?g7i802^Vl_e=%7 zWx#M;7PtAE`j9BwIx@`k9N!CYh3(Qp@ZSE!>^gjOCGs~Z+4;iS{DpHvhzY#X^kdsT zaJn3`AAhsC0i$qI|Wf|{)ef6fk=nOs`$Wdx<}w=esbb}=Vu){uCD$GxD6 z)f|qy-@~v(P7zEF_!-~CH8Oj8l^-tm=-)SXeH1&bE{^(`C$8#RC&fp9bJNq&&m#X( zrtD(L6KatMm)JOsXU=@*E!P`U9j1nt(*~4Fv=4aN^t$uw+nvS-l;s`Br^Rzat%5(R ztaiCEc{r%l5W4!#n9CRyaD52cy7 zP$i}%Ji5HD85~CslqZH>OqwUk>-(6048LFYvw_iH8b3O4{)Qh6i+c}8Q753qEn1yQ zi5Yt7Nam_DT9Im~t@P*2fEvXz0H}`2`RsEGW$CdFN%Z-JsBhV2D8@02O7DBu%&KF| z`BvZWoycKf>tm8F?o0iEaIc(Sw07?-XM<-T4KszG)Q(r^f>@)?48ro{I1uu)uYQ?X z%A_MNqXmZCMx+v&GA`aAW(vg5R6SyYo#)*iAZ>Ws$c?oxQA>|xAe9=fD#)#4Ikr0$ zX>}rrPxBvvUF|ud^0IVxtuifJI^1Sm8RgU}5vFe<$n20Ac&{DeR~g7?zruWn!Q>ob zK>%l%kpsmi!_6ehbsuJDOn4?r_DRD;v8M#8_oD}9W$`o9@OPbpXoNXg>lU~&=o0B;?LB#3kVKmi z70=PV@foo(mmj}byIpyIW8WBief#FVTecRwv|G0Ijg9rJ>$@zd+9FzP^DKpQbPZ0r z7wy?%y2IK(S9UN0aANW>B(#%eqWZT4!w8#Yv41Q+aIROzOk3?k z4TQg-o__nQqoVYLLPr&2xaBg8?$UEW0Y${befo+M^JIS%7y_WeQtChJXyd%b-X52m z31QI2q*y=$d&zr`snpjYnxiI-TI3RxSn?C7vy56UsaY2jk;mpl`=hV3UwESZZI|yi zkJeB5$UAcOBk4BVMY2h&y%oe~Am`sgf((cC2=%5HOY}F4FJINk3@?&3G!%{589igm z56VikCB_%;cO=S$zPe&C(D6kpivYaa+^i>fzjLi^y_F$qW{DhA6mbH4lV2&He-yUs z5gagt`e#qRLi!7zpu=X{?m8b##hq*D&(Wze?ls?31M5BV^A-1ISS^L4z3W+-qYOIz z1#?lC_dSiha?4`63tHgQUURrhQ(roJ?x(3FP$5r&F_mNXTSo|2S!@k(g_+ zquN1ZuD}h3X@$e+i3X1K(D3DR9my?zs8MkpiaS z6MP#e&a!-Hq%592_xr*2jJYW4o@`2ctlPWLjsW*-*chtW$Pmj&M1`JV_=rITFTFR4 zyflZGj)x;>(v`bqzxi5wHOU4WW($Si?d82!yw$dTMmpT=0c zXgx8I|94OTBioV0eSe=yRG6aS$0dDo1VeNe&UMvrL%#rii~7Xz93#BbpVRoOtO%>H zvPNZT+o0#zm9O7fOAr$*d+lV`s0VWQ!FZ~Ep?5!o(_&asELIT9v8W>r_TFIGZ0ff{ zA*SQOs7vNQps-9oCs9e?Iq`QBb;0i@KC{zfcS%^KT=z5^GqMX?-*#T^oQQZ=4Ekal zgprwD;T8r@g6E_1LwFl@>kC5r<#&=Q4a^aJ8@?z>KV_?-l+9f9I!xS~dMKC@K6B{2 z3VS?Kxw(9M*AS!k>YN-%4gn}e9N%0utY(Z++>P%EIV??q{6+#H)oi|ooXV^*wFd|= z>aLFN=2})^N>NR%RW$)Y@tTMpIrlc_f>=u}rj+~jbJuxg!s9Vdt&xyA+kYj8e?^J- zyjVUcaqd$WEw;evBdCLR-MY)gc!S_09e4#qqa>Y$RPkIk{)5 z6`m25Zf;q7f76sA7KWC5&*SP=8MLaAN~y8un6&|z&GQ_rjPx7GUG?|-v1^;*8)`(1-vE-pFGD;2z0k+JuE&sbG%JF3!@f(9^Q=(>pGkV^2la$XwjDR&hE zg5m@LH^r0MU)k1(JWqU-^`j?G%POf=iG$Eyj(KxWFp&f_jHaz9`?(=jBbLu*!0MS< zMDM+`7_Jg_mh^#O^CBeG`L^s8ciCx!3QMotw>S?X`Ofsb*+bK#lJE0x@t8Kfb!ySk zj3AR}aE}dYcR_pRv(Tq-8Xv%ScQH~w7lBQcD7iI0-7DDC+&ab2Uwf)AWU~VgWh9PN zptV}82yZ#4;N?zjU|xHa(Z|5!JYKu~no-rct)tUVvKMtTc0Mfa-}QD6iY7{RJJ*f^ zQ!OwH&dDq-lGz$d++2`8PGa+(3g%Qb74|nbWHDK}@Od7GR{p*{rDGZ^zV|KRVhR|m ztu~4z(cR3M8~|Wcc21*=-m{z_wPmx0*FgjjTh;Did>p)2JI7~B&q3P zeKL!l&+4R`H~j7MHf@?iFXMwU)D|SV8EF=x?vMLQy}hM;J(W7buI^vbki!ECR9lq$ zhkzlE>m}d^6HC3x16ZYow$}Me@z}Dw7rVWbWe1Uwn|Z5*VjoqmJwpRYWfsR3i^N7; zia$3U?r`&n&;Mkw&@s-EjfjgVO}LUX#wnqCWgu@Z`Tl}!EL`&E;c=J*kEo`;Io@yu zVHkBi$X5SLjF3u|3F!gvF8T5Pu^lPz&z}q`2p+t-m1{<_3~U@C8xq9@Tbj~UCW57n z=8qgpc>1eY=Wozy-U6HMDzAF0#CJt!(@(kPAF6N-g{{xkcS^MDI!DR~QVFNTf{}uC z^(cFv_~Ldqr%W%`6=Q>4YJDBAknnTLfw^~oE<^hwI#sX8LYmEpI^xV&!VvAiRv887 zo>E2e$1i-6J3Wo;$qw(Yd0LDDjCFudUwq$^{CM6{AKSU8u@Lhh202sBlV=pRQ36FK zPE6BqI##?gBYj(?tu+UQcnqjUzx}%#&!%u(qaqZ=uhSXq#XDKIwJ~?E)>W-fe#o)f zwsxns)#Rm9D3<_Qkv)`~lVclqu^8Ay2?3^$A{N4$~}=FL4$|8E)|#~w;||^9Ef)5M7(oAXs5KL0wsS+@2E?x8bzA$ zl+v4tfmQiP+@tXLa%O5-s8XI*-zOelp!}!M!$nVyvp+)MI&V{Tnf|G{04T{j+)%bj z%tT?LzVD}<9v#;zmB-JP6eb?A_gN}(f9Y+8|8$9!d?g|wjd}o%U;XObdm(7D(1h_a z43Ox(BWra5haGj4UmnbDuI#@B7+qLIcmR(;t*%Hotp?^uKsfV4)hkO&j~W(x*WPt*V5`|3#}chM(eSYhp`DgI{;o6=A~J# zo9wAy{*aSg#|Fc=cuBy85nd05WF1#pkCn8$>_PPQ_6^$>lHSyYv~qr$6`8 zTtv!Mb8HBI{BvRf$VwhM9D%q1Y^nV^mjz;H za62Y&7&MAWbBTOYavGPh66&?^r10YzLFq2Q`>D5f#V*NtHWMHn1g_uOMOzisHu)L{%>BvS(I;L#1~#!B8x&~8(%h(+k9r| zMR5hcHMpp=gKK%-ZtGgP(fben{W*LxWLKIOc_HxWRqSi$;M)d7z|UDY@a+;W0Mlx6 zfV*S8M3`1SH-gxzG2X=61NhS#tDb{2%b5`hrFN zq}yB<%Z&dK^4$X?Zzf+;4E&|Mszo5P^7BT$R)WuJ(|aU{UAs;knyKufzIC(yQ(+}o zN_bqrxh74X^7}J*cHl3F=;6im244&nTOag}4eqhUjB#-p^a(xHr`bx8z{&1Xy{2$x znt1;_gm{U;Vt61>1Cde%n6~M0JL2y{DkjW%oeLxlqpJhg^Jn7e5!4Em``tSU289=n zI(NOBc-N8QZBBK19GZuzQz`Q*doc1!i}Vj&jTUXx6+3727kzBmE_f`C8gXs@lT#srypjDjkqNO4p=3DYu%_g_4d49f`YvK)N ziiN*=4}L%RjCvR*jZgX@_0KiEOOB(H-u2cB+~*%{ZBJNSx@?1BRPK{BxfOU{7V2f-GB& zs=Hir(DQa3>QZCzCyOpoVv@9nnHOq~?W2@(#&LAJ4(goa&bd-PVn|tr&ygj(Q$EIQ ziG8Y9g9V_j>%*cpolo0tou-AHG;Auk&mr_ImNQP^s4JzG6qs)8UNq2vo)|_0@7#Hu z*8PDUU+yn<9V?^2s1K&u!ag2ROp>!&fCF@HPieB6fwEFD?SKC_KuTjUy@`jr-YBEt87^n+(L!d62fHAt*XL;U zi#$a2RpJU#a8HtlD&wZ^g#G*5^`mK!c++V>;WsgD6C*{*po@gr(POdTEPkr(~7|+x5V<_na8VwvA{;ebE zD6Cb?zvWBHB#ln`^EE`_i}}yYYKdk(d4<$3dZIC7tCjLXqacYH)Vn=`;%`u*zBD;t zAwm95AA=ZNc#MpPM&_eJFe|WXS|$T~32F|2{oHA}%3@;Lmx5}}jE0}FsyMMZt|@F~ z8$vfMVZ;i`IWEENVdI-tX6^O9JU6N+)?iRx^6|!JbJ*gWqQ^e5T|*aV5@tjfdVa^U z;_5Hc&ZX{d7Fd8_<72Sc5rnBwg2M84pYy2ypTdEkWlbZ*F*P42H|S$;Hn;`|1JOYm zb>KXOWf#sTI<;-HGd0P-e5dAez8Z-zFrvL)>x3Wu5Y@bA7fc3|^&4@Wy%=Cag6HPE zrXqjI!y+_0Gmg24aDmggg;6r%{+y2@Sf}p{_^SEV?~j&e3rvc#L5iW=SbYyxLdrXP zrw)GkCd=f&N$|r$5Ys%#Vv@b-!3m^JB@}(#*(9(o?93e+P~6IRQQ~L9&R24Az@R8n6-rmHAgBA2#5t_-@6GTb@ zaj7zbAX}{JmbhIt`IkOB=@#dxaoa0(nC`<=%+q3|B&mou}{JE<>GoB<$(fRM(6Hz!Kepge|Tgl!M8 zf#cB55*A<1Ra)3y2w#&ju4bCgZ{xX!o^N*UH50J_)*ADWRIm=G6Q1NSM@vd;Xsavs z=5=>!uT1pm`2$5~&u@9E0hiC$>dwQC+O-T`AJTZ7R0-3hvyN9W@aCFI*2N#>&p2jc zpaiv{!F}F_L0suxI3-d;X!jWiw94k}A`tghMS?rTLY zU;Wpk6hIT-IL!Wz#gqzgfI07da}4*o|K;ZT?23B-3UIdJHwDD1V=N|Mku?oV?SQJs zXn}3nr`#FgQ!M+)2ky%M1%@B_;Bwd^I8xTi>cpS&bL=+=`g%!tvFv}$Q$uwqm|Ldm z4-r*=s8vU9d{vw6|EI1Eiq?|^#ETVUhhn1K6xp{<-{?4^6jp5rzQfKqL}k>|8lEwm z-!x_zyM`2ODh*<{Fp_W-P%oe=SkW2R;C#kzPJg`@5+C075+x{s^<^?^kpsV*>Ol9& zBycJupk_~@M&)+xhP$Y0%3fJ^%jZ8pgc^t6i8FsM2Iuv^yr15-otpkT&3=BOgWRv% zPv$HsugF6;%~h zixOxdziH@WvG?Hgo%`^^mr1x(aT>lA6Rqo6nim6AakcGuH&PY3IGAOr+*2h9e02!y6a2t?IYM z9kjUPfjV)l$9$y{TXm&~mwytmg~#14_Yx6AI%i z+sq3TPi^CxeFkM8>|YbRw?%je3u^#$&X8H!i_uO2u<|081$u+PnuUes;MlykkZaxy zl%d=~q*nsNSiv2vmTOsjw^}c6@Hd)&4e0G!c3S+>I!EF>mXOr=wd7hH-+wKZR8MU% zPjvXGsYXzR&(uiLs;+#v9Bnuk9l+H*lRbLyEa{-6-lv5sE!DrRDNhvzzXpZn(i2!-%#(k>eIEyxsX(6cn2|Wwvhl#_&O4K- zGT#_51ey84WEc5zY6e+*7?{i<+Ji`%oZ(7|!kPqXupBIHmK%~sUHL3m5?VFlPV*ge z_TGnVB@<{=A5rY5&v>q;2)o|r?NcjXYxX)Bt<{YnbV?~M&m?op+YS>e5AIWNn&|-l zky;y(KnmJxFhxvxa3CIT^#x<5t#Xm47tDoO_x7Y;e9*0a zDHEXf?U_w2kVu!JlF^UQie8qC=G+qdNsj525ZD%Mdx?1;X40Xm-2zf0my0Z#)gAqY#ozSGEQW4#xVrLPmd&GA`MG0-o)yTjE0rPscb$+lql`P!yIr))BViVv)tMJLe5%v?L@=M#G=s+@ zw^whe_qnR$+lCFlA041(6S?QCdpXnK=e_)mhPZ1#72T*D3Ix+%+yLP$%be+4OlbXbq{>w^)0BvXX?|%MI|TvhGj(6lCocnd6LE zfwOCQ-@@H(`o8Ygi#)VrVjxlnf;;;jxYCa#Y9IK5VUAwPNn1JpvpMB_ukeNdYW#gM zR#yGKU&rThRFzk!U_)$hRQZQ{mGJbkDVF4KZMoQEjIZLa2wSn1;U#%sjD@MjD2C}5 zHLN_nRaN-`90U{YpbtJ@v)u3>h2_S=l$-N2v_4O^Xlp*XBPou^tNYzh%69oY)k%qF zyA&+xJ{zTxdgg9qp{}^Uwvn3H+OSin&wi&`k0?G&f3usIt$QQ4pI6|VyE(e4YT6VV zNKS=4=(_C`*B;_v(W`jJ^>a3l*rP)#b6q%C3DNe|P9>xI;i;e>%J-j^ClO+WB_j*D z&OIYO^+PQ!Ttdx6t!-@`-$F1#yKt^31E~le>{K*26^9g!Iz}pOLPo(t@xa(`bGN-R zTveZJPMTk_FBSUkQSFYIypds^%>S6Eg9-^do1^;2-2AF4Y76dxZYWbw z+u5&89ObrP4=ozMl;G>Xa>-lXL)+kDS;H$(=3i)h${>!QOrrmAVU& zr<9!?n|^FK8(#dAJ|4ODJ7b?r0raUY{^|#!8>hxrBe?*^N?3ZT)**h@Oo%@l@>BX zw`S1#bWlY3*?Ik_O}3bgx4S>Z*M_%M-gg*pR}Pm?Uk=+dH;!In-i)jRs;j2XKbuhW z$>1V^Ns3|FZ$fBT`;&nRvoWegQ;}ZwmPu9p{Iw_iUF!uWs!}s&t9G))+z$>*2ul0> zQ@F*sqKj9UkW^9h+&8~8*@`qg$B1do;|B;@zqV>xLgmM+GOaW0@a`U;4RvdYqgB|k z_zz&M)!>LcwH+;_fSaL5Ao?jm{#S_pu!WgXjs5)l=_*QHdLLg#$t$HTEc9e#j~DHm!<=^HZPW?Y-bs1JCV< zfV+au^wbJ$Vzr=szeDR8NcFKL zUpo_~3oZq|U#$M^8LXqzh%2G0^*U_n^M!(wTUZah(UKe0*bZlEUG@V--6Hq<-Qzsb z#zXfu7N9Hvd-P3)E82vz&{~1|P?h~)CF8q0e~EBZ^(jwR;%}>SIr89E43KMAzFFmG zbf)89VzRWexMK5;li#Wg$h?Lv+1DggA4Q{nFIWhd^W()AcY9K;v`Bh`=qTT;j4=mS zdp*^QAtC|v|4k_MR@uH$9puG3qdRx(rq_b2pyfAJ$^9~H=bSBk;ci^jCLmx^5 zy&L7d8>1wZ@-4$ET-ZODhr0UkmrrC9$sWg;dDv~Lv{*HUoOKY)R)HmNJ|lD=8y!9* zr;F!!7FlBHhlkL=Je~xYq5ZoVe?z(8r+UKi5@Xa=?|5<|>!6YY9CgSTpMb|hf(qfu zJ%!okqDMj>;?4s|3RL9|L=`fZIyRafU|Sgt*>$8NU-P;~E;zXhH8(@u6F#?+OJz87 z2D!OkTS>Q2zx|u)yZrD#k-(in-yyS7yzT*z@zIi_@;=|B@&k2s;9~RQUestnhbEYC*6>EzBjkFJmri zo>nz5#MHb%3O2Udyp?p`be|I4MfYb2)1IwKI_4D!)GCFFTS4Ia7JqBvqs9^%Q!gDDj<+R_l>q)? z%E$6oyNWt{XVXZgy^Vf*qkCO9qAB>n<{@p%7F8)+c5u^}>E+0$P=JGEX}1k=7rAbo zi!*1;!No+jKp)4?4Os3MPC0&s;AqG){27pjV!u7=xpxLe7FL@Z9Fa2qj{@as{z@MA zX46+YvM1htHtD!B8t>ap7{Ipjw5sOdc~7bXzzVf@RCNY+sBC|pTGO$VMB4w^VEKY( z707rB{M_TdP2MhZ-UGUPyB(ns{BMQEB02tIl^!idzSIjaZxyWy2~S5CK^FKxcAY6y5|+? z?cSNKG$WU6tQnW=#9-)2DGs$(#~nL5iNe+l9K+S0R-PPfRzhEuFw!@bV&ul1%>{3I z0{pq9nsf_>Ds}Ha`uP=8E4jxplIV;z9xYHe472cR|AD@85aav$#JCt8U(HAkOn38L_>3 zdq?v032XF-H@fD&qo zB#QJe_ctwFv+BCjAaKi`qC~^iL&`lKOZbV7BFnA{prx;FIDUaD8jEf`+>oM1HqB^D}+r7VI@Igz>8?@S0CA}{eKj#0@-$r_X4Er5n zziPev$)EFifc2lUgbnPqV7%FHbg{kUU5c6wgz zzz)omSdnED_3N=04Ez2J^itJKqDyNMMnG>U(OS z1d-fNHrRK;_wgoz-an!0>d?beIBXZSoE$`!6){Q^;Ln}!Lj>mpj*4A zW80l!+B%PYJcrGJu`vOF3q~%X}lv* zBhh}mRkaBn&qsgxBMVcsNyGXt?Vfob{*Oh$+xL{C7?e^=j|fGHqq?B`0?X-_m$a8S zXGuC6#G3kVDg?qrg#9iF6CG1exjLnd-YxqVUR{-1(RJWXNhB9|A>3fCI%mm`IB>X-iE{0Wdb(sB#I`1NMG<-*sSo45c4jVkqMQy|=nejRanIs9 zt<&4d`CgJ{P?xn3QnkPu$acuM+zl)Ga%m&Y#1qQmFzKPrz;_VAn^p8HX!3xYMD%-V z>-h+iB_hMcUxh(wJ#g^0IBOr#$o2J!@afi#@FdFy_la%wJ7HV-{MG)j?9e;-&Q<#Y z9ZS|a#ao>w8Ow{h+0`EJccAW}+S0B=N;UZ5NQW-52@6mu!hOvXQe++ba)zE$Enbg7 z$sMEs{t?Eoc=gZh|LV;gJAFBJy$WpstV1}qEkFTj-)#+=yzk*!=evu6kw+C57F5wnWKn>5Vl^K>N9Ncc)NaqPP<* zeN-dG&DMSk=xF@kYse%;v$8zgRl;b)!BA6Uk~zKK2gs zK{vvt=o?!thA?Zum$ZJz0Z+=Yny0Xe7qN>d?t%7$5gbQ~!4IyxiYP5jHB47rrTNP? z9g&c@QJ|D<%rhx17+^TQMQd)4DiU>=6bOkq53ma4JsCZmNVr8IHi_RB?_JEF z51Kj3Wtw#YM#dd=61saqu|bx$sv2u85tKC*7IwBEfOYEIn9m2ixTDk>m7(MOpH|gh zv^tk{$SfctrP)s{$}=cd;&`^hZjDH+_?s6(8G!HiL({uXS*f7$6{YnRo*J3W zz1`yXCxIKl=#%XbgGQQirpq^6&W+UBf~l*I4ecBz&@SIYJq~X-2(u#VE0YPE0!Z5M z#T6}<9M|5W8qJU61BhW9W?L^yU6!mw4(|B^w{{-3>mq3_shHB8lo*s)I%7MN+D?(7 zYiwK)QgGvkfiZ)?3sv1IrDXEuS!Q2(G#Hh@=mJ^@NwvEn_35@7P>+#(Hj-gVD{l!p z!$*z^%Zr*#f%z<0*Qs>dI+7agMtkx{Q7M?)DR53eKb^X%!akTLDqHEr>|YQpWMC^Gch?&@S!N_8)siNrYBE*e>wO|;`vA-{+M}m^eUYFApW0oIOVqowA8G=FD@@|TlC4B9H;`ijFo=r zsDrBDi#g{d3;$I=?QV1dS3`&zfi}A*ob3ZX3;Cozdh%DDwWG1hLRcQm^5*?o4$kl| zj`oe6#m&IsIAS<4cIexa&;U&*wO7CTsRPpAFUS{d1iLV5O{Q=iaViEMzBd|hsb6{7 zN(XNgRa|nIdKG9&`Th5;QRuKmi%YiHsr@*Rsm2UsAkMCR!k0tv8D`m>ZXqa@Bw*@@Cc)c;y2!%dW2HxkZ27XSls26 z;(zz9l)uP8<9}_Hf2S=+y;rNoiC83(1E7&EahezX<(|LvPfOI?<;PsyMh6wi4pq$m z6NLGb#?5E_UPZEjRn!-qECM4JgYR@DvJie##1G+g$P<=kw$g^j-n123)6u?jcE^(J zq@V>!DmD!4vSw`x!JbLIO1k}sbD-+_NI|&z@fO>rZXKvU6jgOE?0PBW6Es+Rv_NjV zP_4~S?>(D&D`ptRUD&XjXRz24{|iS&rb?jXnUoDKPN*?unDjy!?NA*&(jN$%HN~ZW ze#}(-7-gAwwqH545k*QywRZsEVz(*bj)rW?0_WuoOF=Oy zlCdd+uco&Z>1i5P%%Jnm?0;qRANOwm6#BO0ZlzRXRR$@I$4S~`_}DD&y8kfFhGEQ} z^Ae$oc+H&z;>P)lF~$jn=y|KlZ7~TqrN@tSYpBY3s}%e~-L@?fa#V6T^u%V4@_xp0 zpgveg%4Bt(nme(*lO`YfVcPK0^7{CbPxW2qBui1~847lGv@_joAW~Eq0{1F&C?#TB zeaQIPFec5zej5>y=)2IYmFp%uh4|*90z<*K&4H+f+wp2vZ(^{At=pjBYXPv2gW=J+ zi-(aM$e6yj+XfgsvgK9bF64HDr{b9|O(A(6&c;8{+7c3g4DdUF{x9eYzaWCoBW$87 zc8-)EmG%=-WN|5`pcEfcPm5ddE72g+y2jjtdSY;3m0ESg4$RlzM)b=u&G9$~ zUkQ8BAd9a4aoUn1%}zFMCpjEoSMHWtniSGhGHb5|qo-$RtT({XB}P>3R>t8^u{xNe zP6K&Vq1<=k%0c~a6V*6+ATuRFDae~+KO%PIFt{_fU=ks1_ezi083o~N za4_PhrQaPFXnjm2HTKFahGhWlIurhT>&5TW877RWNh#hV>w-59hHUU{-j_S=XrLRM zjaao$e@8Yu(HLnI))x6x;g$C!md@m}s?&xk)p7Iv&V_f>j}GGv-z(dN)=H>hmdYHO z?S(cZyUT9l_m<`yn@LU9`R}MOJDBiXLgy#^G>vjWq{13mZ+d&S$q<^d{~s5sS&2Mx z)!>80XpYu^b7hG=$oFv1*9_?_y3a!wt%#&3;tz5v*aqX;VACoLJ% zV{_MRZ#dR1mu8@_$qMpS{SZ31sO0y>;h|aHfoO&yu`Z$Orn<&X_Ywbpv`nI@H-oD1Rvdc`ieBwD^Ddw!+g zu)d8s>={U7FvHy7irht2%G*9DmO1-TF1gyY42C;CKK)2WjfHcAJt) z6``j+KB?1ONj`6-`7g{MS*x@nyv42l{qUx`n-#aU3kh!dQH}a@bF_X0-|3??L?j@j zKenpO(BRP;F_2Db7nd$R2Am)%yJKP#)GJ{~Km6LO0XeWc>va#X;3f=lJQCf!(7O%s zt|vTCS6L#q(ME!%eqm>od5;8E-=0VQ7Tzd|4gEDU_(3-|m?+jjlaN7Eci}7vsyZK4 zzB;!;(fMFiviI~gD-pwg5$my_&VYP>g$$X>Q4-2e24S0glBpi(lTc9KNQvC=S9T8T zFK@DBMz&PMO)n7FKP-+tz z>VtrtX~L{vPP?{&M6(FZZ1{%xe)1o7Oi^%eYK?H#tchR`m3E8A)?#R>h{sr4d1w^X z^$3o@I$b$N$qv%#*KcE&M%^>w2m5?_SLVaVUxtk&q){vYeXZZ!*uj35fI&^$RC2wi zZxWE&ZB&ID4`kepL6`Ssdzho`!lLVQiWPVhb^hTy=(kWF?h(cRx_A}f{|sbjxn?ZC z@2?-avgd0mohBtUsvx5fV5o4m*%Py%AynsQ(Z#AOwp@Msld<78S-oC+!N&AhPS*J| z)|5U2aDl2I*DPbs1JTxoovEk_I4rWhqePCuBO&zYC&w*I0Y8d5?07bw86hiw^)B4P z-f4BZG|N62vy2Fs8?W=-aEhZ^{gPoni;E&K;aQt+N24C*S8u7;UDsk8gx9L?_q|Fi zgr7B;?iPq!TFO#ge zYsgu1Gd!Wl&OV0D|53JiEd%%btJD&w`#w1EWfpZ=1|@cjO>Nii4QA5XlL?ef8D=WM z_iI-M@ra`SE=c33dvE|f@G9}|IT@bVHA*lO{T*QZ)0N!bo2=$nNx;HJ`~c@^Pw(V4UyJYGNUCUBn=rBmzF32{%0|=fKZJcqioDc>c$`df%*q|A1*`NVp_c$|6C$qiq${`;UAle z`Zq;nO!Kg|hubd8C8ziFVAxN>#71S%l>D4K*0z0#RfKy%{61!k3(+%!+2GSEXq@qp z{VHeZ`BlGEIix3((kLgx6iHIuEP*D~o0pZ(uM2Q?SHB+BHqC1!IUUpknu7-_je?C7 zKh0dMcx~JeqvP%eWH5QW^CmX!rlgZe-W2JD{s6Q5&TQv-9Ej?(>KIg7g0zSEr(aK2 zU}Zt%-N{M5T|cp?GL zIQCu;r7d?tw(=Tc4|qB)C<(6odjFC(rrf7$I8N02D*Cs!aH;Z9F-m2{iQ*IQ=|a%8 zN&Bd#g1CBnu;xntuBu)ot*t08`5E0T+;^#II9lW=pbEBjZc3*ZYhyB3GgjjCGko6C z3p=j)4e@EYOK7kP_0_axrk2*G)E5_tj&))-L-bxWQY*doW zQ!E62-MTZcYc<9>PM9}BSm$ROl(iP|lAo zY4q~U=Q^bsYOnWyqTUuVf6sh)x9v^)kFIUO(*e?V;mxt12lcm3oX?wHPKvfnE%@$< z(WW@VE)9VL-V3B#7eBj_G66GqW`~v*DD9`KME9a_`^2@L*whP*=p5@pvaix8&IM3d@Ubg4r~tn{e~_mTo~3w>UrOsJ_mni}zg1V5K|Y`f?`=`%c&T1TN8muWUc z-$qek(-lmq_`qTf9liAY-WM?!@yW*JdgDz@bK!m)BBh4SPQv%1Fk`)U-+{IY&{;Nt zxgl-3<{tU&Oav&sBn|MUQl{ljmZiZTQX%GJmlX$TOv`%^epR0x5mkt zLSixoP*s?&UVbq72X5`XLCENws&W8Z;w{&v$im$}H|@~5naU$9$XcJ&!SG5nmN}ks zF%$vuUr(x>4JDr*Xj_W@KL9~MzQ1UJ?%cM8zEF8W76WFu%UNbkRpFt43V!nB%`kfR zxVx0_&FlwgJTjw{PqF(ieOvL*eap4J4<4$=IHT(49I&GLyhzxdm2}9fid>0M&^<)yll8K)T7O75$o$DTKQ5!Fkz~4*l6k>dorEy)-^UD_G-&2 z_IBPb*m?>&4{tIAhW(UTo_b#q0t|p2Z4o6!#Zq}9;hwFGxyTuGE#>zkj_P#wNXrnsfeqH66X*7if z$TIeS@7 z5(#@@k%@yi=u_V;_eu@}?^p%}HnDLM*bVYE!o{yNsCZJx$32Oi_p-fBK|5|`3VFwu zbpvYjPlq#OUd@FaiJuVq$4a@+H)UM>Ti!w5*yh>o>9O_23FkuNiV&XF;tJfdfmG3W z?n-Osn(wmfFlV?oFHOe>TkdyuG8)N97Pxmf32-y*GfRU#VUF}U+eq`tq5`daz#n>j=w%%UjNw-AvSQH21@NF1$ufz)qttK?bE( zy?keA?h=TzNIjX)>W%a(N7yyyFciC%Gm{OVSv+)mW_F(&cJH5HYmc}-$7?8!h&pw&?X8R)P|tc>q=P%V%4VTG1y1j0U}@*B*GIM$F`T7Q zgu-sIJ;ZUHeD^&mnPxy%hjcg>vC0l~zUb=SE2l0(GSiDip5*&Nv0~o~W4vs>XV^w* zQs8xI*z>#5px(p-JQtT`#AwK~INZ+5le=F_-T?A9~hRvo0~^eTnX& zP`8k~qigAgZ|&%gqUeGl4qotDI~~^7b@j--fi1s#+e6CPzO4hV15d-2vym>xSgN0M zxx68qB6JE5EQ=S3Z$^0YwR**uZ(H5Y=RR~%49L}D?9F$4QeBWa zH0=SmaL!vbT!$(kcnt8;+I!CyIqVM8s@^sg(9@p6bB&di9uCTl1A8X7>?K_)T$}}! z1bJOv7$sSiu1!llkuAdSondUeZ5%ebjoJgHrgGq@L^)+}qL?ATTH@!PQts>C(aJ4n z3PL2az2Voq&z6>-cJRoWvNTA#P}RCM(B9Xw%IPj&WkrqgJi0r?&IJ`JCmp*MX!F-* zbgYY|ZhCYqd1T>uQ>zNG;gCZcQsiXft zZKIJRbC55R%*)11RhoI3d)^0nH&t~DN3fxlM~B68*HKhE*bjSb4kvBOv?25OrB9L$ zcPTDAiF-J8loz6C@y>QYRjsYVTf7C*bvH(&N112X5%*%W^?Z4SaXWCm40jT)#dB9q z&r|6)Ie5>c9p}NQA1wH23tTGi>&l`^^$<~D-%&Y@a*^oG5EMDgXVmdIM zs*!niL)fn4?d*Q*zDnm9JHRm$G9j9dk7mW#+E~PQe3hpswa1-4|v&UTRZFE_XQB9=LF+K3R&pmykeaFr&pcgR%&Op7kq|wq1 zt)6tQ%jy~s?{{N~yOUW2E7O8qI5Fchq`4V+Sm)6K#Qs7{Ep!gyyc3oi$qea*RJ zrz2aQGfs77g4W<{M zox-e2AqI@%H=%O+pUzVLuR*I zcARsu--O}SZh0eK2swH8cj1o4b#GJ(>AJo7-3alRJmBEc@|}M0In>QeV1Eif6WV;n0p{!dB8M;#A_fx@(6WxV*)quUDwT>XIuIZ?Uk^`mStQB#(mnvCrOERx{^D>Fr zQfr_do>2$xUM?#)B6LX`MrwG6pniuM?v9+N6zSP4-<@e=M0=eyW*8K>I8N8GAoSqT{@;}L#ui^j1wF7`)WJGyMsw_UfZmt z4mI3c4^zJfu}$zfaGs9$hVz#u`$qbmAzbfCT_$Qr)a(gBK?Rs~D7V1qnVvBpmh~5t z;?GVGaK+ueLl|SyW(-CUPs4`w?lsMxupPOCserf088g8>-tS*#qZ7U+;$-m3e6`iV zK3yE0?%7Jb*fVE?kEWUM;?DT=xyYOY26nc2Qq(P;(g*T(>w)r(06^-3SQB zcpM#*6r9Y)1v7s*Ph&%~eL*XSc%W4B)E;%Bov*h0s-(B<8+p7jW$!3^&vROs92riR zLtCKlX*8Ga>>x@Eyr45x)5F{gSiqStWN>M~voN=;8+|qJ8mCB{mh9QvLU%a32PyCl z?ea=#hGiXkFzbgpG>5VPeBsR|+oJD-o1kggH$Vaj*bR5Dc;x0m?++yW=@Wg-zT;MO zF+i2K?QP`s?V&#{axi|PPgaC!;+so@NwVq-*?v|#J z>su=;TLckB$?+U-n;CVX*%yR7Q@CB>?2YG`C~_Nh7l!(uYq*$?lGFLSu6XY~>6Q4C z`Yu_tz?OQ#aJw|Ql?LB+TFyNn>6N1!6(rGz9v)A$cTDM$cX8%74=ur-l~z?|SB}|! z1K%)apAk9p2x3y^LCD@WH-j4on*(0D;9}J@v3!JQ5$vq=Syt6C>$xcHCeKf)FbpS1 zn<(ALa>Jp^&3IBoq2$4vr*{Gm&gm1Uhc(9{l)jIayhd_Z>@2foDeHT%5>u#Z);KJ zhGqyNfhv#4~9z|Ue1er%*|CU$TJu@UXOX|Z^#$XoI4!p;L{CS52ejPZVo|v zIQnyt-ga+YO2T7SRjPpu3GsEsSi%&}zzY;0Lhvh+I(vkJj-t(rt$|sZr%NCytzHRD z4DiBzJ(X2Emu7Nei{=wvsCScx$5#u)^n82VxqD??LuG?g#ybh3CJ#IrMjM6AvEW)| z;!lL~Mh`Uw56~nrlvDC26u!#UKWOfV9Vo00_l(*fR4tNgYrRtopCo4D_qq*HH`}1; zJt2K>qfc+N1KzGKUCj>Ap@@xJ?W(_V*`;7rjkHe$@@;aZQ|0a@t;N(5JnBNIdjiuv zt1|~hsGhnJM+ON;C{dZhi$UJS9l*|dzL~lqLwS`1x)!>SG0gV%dc9h5n>$o_#J3;G zRF_`wU6Af|9GAPh?2L=V^PjmrwPI>;Ro+VzKr5!8PZN~uk98~AI|3%r!;B(j@~@u1 zc}xXeO}7Rlqjpmx~1pI(Q2#;&MRGud&!4Sf*47UD+MH1>{}F6h%K>JNg;y$5Q}@+?Cr=8w!FzZcUq%JLSV@Jg*uZX zHqQ8FW?(+u+t}M$Id=`vB%OwUB#j}2@7~+{k*`Kz6;nSwR=bjR2&DZMF5+q8O~T*q zy?V2iu2~M98T)tj$CdfEp}mVmr^_a#%#MDLNxq34BnzaSma7?Yy_%6$XNJAXqkT^a zX#{KS;&FRCJUdK2)+D@!+*m&Bmjw~jmbm+CClDBOw+7EMlDC}e>@Am*qB&+yLVXcZ zI@O)*_LFFi6PFv$MvJZ4cdxvlm<-%EMnk*SYoUoa^Bl(2%(~rl>}=y|8f@@F^U1i8 zYjMr;tgNGueH=pO@M`w^m2kl3D&AinZq7}h9ChnS2J zdweUX?6Z{aAl@F6=ufT<)zFdbW8DxF$_buh!o-IcuTjf*MVwy@-zk<$R>7&p0IPc} zude%`YEGmW6)0=$Q0{~)!35m!F79y`*sm&tZCNhvI~9xCwT2T}ZkO!COf&AjyomLfv^T3lP2FolIl0P3)a{3P z+luy-eTurx`7#DI!Hm*mgi6WIUalS5P~7CSX73pHsbTt^9U3>GQ-Yb{nMSOPOS>vT z``fk{6Dk+5c!cvbi6qE|h)&qJ#@uI8FyY6;n729VsefJ}FY}0Js(>!Y8 zteVjqe5-M<9nS%q2PQ%F4H4;6c`Uej(IUy?w5Nj^;se8i+3nMyzn5By}mHrSTx@C&sJNpG$T{DHVzHyh+kv^jBG|9yA06G ztod!`6b)s~sa-te={H&09gz`kMN5|vmbFQhDU|2X3@4t+mG1v!g^g&!gsz;ID4^Lt(n7 z6gzekY1Ga@te3W39|6nOPFm*h9>y-2386cTo$5qvej)o$SVY0I-JEQYSQoH5Wc^-Y z>^)oDo0zkRo)9SEnZ2dOu30%ZZ-7@@D@yNcZs_zO4s1R{w~URF9^9$ZhOcbpcRR!i zG4sB`8=1>WkagL!ZgnlUxZtEal?$>e$oS-L%1}AlU44up7oE8`p^f@X_oKws$$s@7 z1nu2A@<{48!bJ@?!$z-W5QnSBc2M)iH}<9F-rCB!*@9ObQTJ@|??_Viux>reEeMEh z8Dp;3&Fh3iqLNZtwakmd(McDDRGb1#UgU`9UbRoWhMrZ?wp*zCNvzdAJt{jx0quIV z==Gm3Wgnj>`}NIRQJW6H;zqbK;hvS;nK9EP^ms?Z)#_Flj9iSoF#EN@Ig1GGxe39- zZar1O%Q465H>KB^+lrgxE9}m@JF>-LRXp<@6(Kl;I&LpF%C7_1y$jL7=S<+ZFnng2 zwU<-E zGs?Yv;5>T#_eB2qKen zQ(k%^1u@#n?nE?gK?77->K56T%X}!>S#H?V#qK^DUCqqvtxqxx5R>heW6FYUOwenZ z%*_riW3#Q?)7r4I3c1@k;RZlk)6{EOwfb{D>Il^R%)s6D43y*512y(C6iYX_a*1e~ zHX640ltJ$uo_7~>bBa5S-2aV2|82XS1mqk0aW00v$X)HY?JOWu7U z`9nubrjmwwqgGd&yVyqUN?Sd`lkEs=S^&2?Y3iS)>qb zdnY7fcq^&a_FdC2l#{_{za837!?Z_q&$GP#y-N$+rLs~Rwb)u3SJJ`xR zl)2u#Q0I-$CM;OC8@=oqs|S}=QzJ7u69kMy!A{PAKEpu$Fd2CWLZ;=tJUF>7TN`7A zBq-ELbu6~oybd=*F>J@<300K=Jcq_SwC5mrgEO&OlDdn^)Y6n*_Q_iD zKE~^JVz{m-<#M5|f(1$-=BX(5ZAK{%j~}%Fk*pbtJQl`W_W|D0GK<^3YjZOe%7~UU zupM%nnZt634P;B}Gp*1bNK9IW;44fS6>55q3# z?rQd^7fq0*2(LWd5WdzUg$~3nW@`3MgFI;WB4poek1%%igYDZXv~k>>v}^_>ZNNiJ zfxiGoKORz?^*hN|xt;qYcrEwkd_Q4pZjU_{KhZ@GJ8wPn(rwwqnXI%XL>!J0dUW}<{QE|~gL2P^Eg z?bqT9_Azeet4-(;8tdw`_^a;Q$q;CTGo|{4*}G)qN|jJ})i;`g9nV(!cVW5FsgpMX zzB+rw{ENQ-O$O~xK;`Zu4Znc z)d`O!T$*xCHp-VBz9CyO)Pu5)n{Mxx&@0D)wq)z>L9C*8G=tve<>n5lm%WgS=(Z4` zmsU#b6?)lpd^o6J`l`>zAFSpT)KVpA^-SqL&0_(aM^@v@~nG$MQ=v7q|S&R zDUOhooU>)%Hn}8z8NWH?7((kLg1g2Xi(@O!Mj`>{Zi^g6pB`0WswVv59S1HW5C&YAE|pHKpXV{9^}b8Yjolp;>axmZTXx>7Dv zy{-kEu*c9W7~t?@Uv1g1-J-KDm)(jE=CVU->>jC`Q!a{N@T?dHZ^f%NQDLy#_FLF$ zCwA{NjH}B}cZjWN@@7i*K?NIhI@dk-lgBibqmR9rXisO?-D$(E6D%pKyTe?bu6D$U z>{COo>Feo08pn$~X-N%fT+{C%+kMX&**c-ehIuBOo8pkW)N~m-E#23MFtHKg(HQye zc0qg)qA+C%C)^aP<9Mz+YRP^tR{e2xy<>toccThziw5pU?r@mCwhXfNaD8~G zH?vxqIp3CX&ed;*6l^bFyhGsRy-BkdP2GRVhZ@E=S4Ok5z`=c^N zEzeJC%c9fg=ECKjs{DBvacoCCHqFYRD}u+kkZMoUqn{xOAaIp!Xn z#pt_+tvsH^dwbh@*_|cG#w)eN1h;!1bgi;bX20H_8X&cP{U8_Dt__c2tN}`!VMOzTM~1;VehR zQ&vuHa8VSgXeT$-gWkEb$JP#WL}2hErzkadRi3S^IpvAe=XLBOJwTj}Obawtr@f;z zNUFB4H6O2R!2s*L6I^`jSZ*g{>J+}~xuoD%!eQr0-aw-y)RZ`3?t_a@-QD$M?I7Ql z>(6#LU}#Mki|tbM2E4`8mJz2(+9=;Q?JtaqUh$=s9`i3=mxa6>n#LzmS19CKC3iUX zjq^QJTL4WXlkR%<2;FK#1D2a?HmEy@vVoZwM7 z)*G+vr7_83o|_x}Td2{np0LUBQ@q7~Sxa zBg=Z{wIQmUiXBIf0ioh#+S#1fz1@bnLEY;;`$N0*{~g$_|n-r%;r&UajV+Gd6vCdZFvC2|%=A)D!TXz#LRk$vcz zXN~og-M0r(>6?NesX5QPOO898?2760Zb4FHQIPX+hY!bh5$=-0(MIq}Vmd~c(Ow|- zdpyWwo}1%2toor^TgVJK3*o)aD5X8x7R_q!+B*X|_V^srmm16*ps2-6F>%+iE)~aY zO(H=cSN0q+XRj_XCBC;tQSH?@xana6;pKvPTs}*J+m>w02S04D4CoJ@G1?rn)EO6Y z9%CkfDOhjQc6WFmW@ThuO}R_RbC|%!`QKt)bG*ousZ|~oKEhrg>n-x9s|Vtibd1=S zkGmn0o#!#|+q}Z<#mGIf7n&r)QMhG8-QwEq@$7@)gG1Q5a0r8aRNY`L_RQ#5A(4$# zxid``v)FQbRNJ2Up&6Gv?+Nw{j-*##F5bG9CNob70idzstydl({d3h~9MwrrU6o-1 z(hrf~&T=t6J-*^EVmQ!zp8zSYVz$%TY;4oh*M>#GsObw{sNC`K^gwa+r`zK_y|DMj zCps)lVq}ywS%^FaO4##3GK|2E%VEBdU9An{ab&Ek%4T~b+}=eo=d-H3tBO-_;O5ow ztw||dv{jC#?&PDQkn&jc1ghvCVGPL^cE_E|GYK|LO>M-JhVH9_$lb-ia#RXaBBAj{ zte7m?Prip^S*GQiYKSb`JoI^+&zJ&1@^c&Lh@u zu+Hua%9se=H;GT>W?IhTQO3p3i}R0rFpZKZil=CnFSroc=Be|u#%3gop_?MrBf~qo zZ>*X~jI}ZaA1(7=b~85c_C3FlxUy*EJUk0NwAxYmESjDk>(e!&J<^}9@SCp@lwo!D zd9-pj(x;JVUM0mo_q>8m!~S%|uKmq8~ltF5eEFEqfWcZNS&nc93*xd)TCCGmKk^SQG`=m5EceURyI z(u`x*N8K6KCq8_%tfMI3dHBLeCM&j8Omo;0@kuX9{);`C?xS2zI$Xg*(CDYWW>8n}l{41_Lz}9+W z*Lu*HJ1x81M&a&8xRh|<$B-BSO%P|u&8sIdW%1dxS#;5f zRMiY`9RVDXds=UW*N3t^ySz*Y&fhIsX6C^uS$R6gGe^2S@4D{pC?(H9edh_ro$C%0 z4*6V52=zw#mAFM}ItQxC7)6P3U3`P?dEqK9*Y4Mvc+A(gRZCNgHhrGf8K@j8PbV|I zi-_66zm+H9^X!fmd_vLPkuPalx5BzRi|-*EZ)~P0c9Ue(2c1UpK3jyi-(H(mF#;xs znWnfrqC@6Rh`ziw zVb5^@mwU?|#KjUKh!p9^*T=}-cWDo667HuPz#VqDQR74H1e)QWKQ5i`Z=tbn0~HA6 zGBn4webpREypvikdJ9|&Zsf`0vU-J@j7fPxZ-U(N&MgU?){1GuZ@%_HP%U`&Ti02K z;HMl_#XOwF+1O7#bnII$fp4l@-%Rmf$Moh68VlVSxts5sQ*#C^62(^R^XB}0w-Le_ zcW+IKlzTK>wY5jK_;v+sn$BgY3Xn}=^b5FYyVZ{c8^|t~O~R;FpmmO75=#)-oovOp zyKhD2Dc$Y~Q`j7Xi|ja3vw%56DEiaLlit3YfV5YijcSpb2KQ&d#g0nb(y8 zE-E(qweff%>jlL~Zf|iteG6zwiD-{}`$o-DFJYu&leOaBx~2q=pfYX^xQalAJCpQ* z6HZ6V!&F?8ar7@5t(@c8*EeFkT*+CmD)Hr)_QY90-DuY*ZJm&PAfkbB&{Q zPIsW!(;I1?*+h6v9h1v3k0d(V3TWjchicYk;-<#2T#IGrZX{K3Z^@V6aIk`s+RVqxJbIbO4h#~2@LCoo#eDKb{Bed>|*CmbgEH_InN6((bIf-t~leW7U1m2@T z=hF5Bv2to-8x6WN9k-(#kx)3VZ@Fq!((5=WcHs&5PSfo6;#Da+oY{+$z3Dymt!l{B@QULNJd#!K zRQ;o1SV^%l>~--x-Py8z%;b3N2M*}PGR)rGt1%yA#k*jEVB6MbV9vsJ1W6pq753ok zDiB-YVeA?>uH0z!brGh4=j)MpH;US%Qop*5#qKhyOR1U;FtNUOknj+}?F5Q@#!Jb0 zzboVpLOHb^a+cSe?&p40jxzVN*b5*QyQ`t##Y3Lh{SxZ!04Q_E!Cl-%-6HXq9_<`m z!M7S#AlBL2vzT5K*QQ?_*1+nLicLe$k1Y?M3i7R6sCB;X3%1X+4S6As)wwG-a~@Q# z=pjRtIn@>ypuXz3&QpPEP|WB~U2>Q*TBn__nbC<9%PI*{iK zO!Zf%C09Ii30~!q?(60uzPSbL51NaOeOgN`uCTL=LB4ws_0xi_*%{+9>~M++J>sTM zLNy;y0>YBzYYyiQt~iTnm#k-9&VuFa8EsuGG?|pQFm&*j%1=zE8#uzUp@V%fipH_V z$+7bR;Bf2P+0)XmwB@@mV}T2S=Q4)p9JMBL4)aH7@=*c+y|F@49Fky&*0->&rWD zqaw^g^mBuwpextj^X>Ti*`o25bIo~<7ElZ&MOCefYCT2cHooD5Zjrp;+_95Gk>=yW zD8mg07@o{24!ZM-WJw(H|N4SSW<6*S!F$Zza zVMEpIPZ1k)m$BnpH{Ka=j-5es6`eLdh;5_A;GVhIk6>@N7+&h@ow8+a7jx41_-K)0 zW1g`nL2IDw5>N)kG4UkjOyXcxYe_@wBR&BO`xR@y=ckXc3&ZR zrb9M1eUCO_MRKrUP922mYts*JbNTX(Ug_$xILek+>Y8H`UBE$6$KF~9kZZ?%Ugh=T z%@j=G?%mGYM-a}e^CoDUbB4=JHy~w%r;L|#gweb4M|9kydvohjW~v;zTPpUx7V+~k zw7Q=f>rLGRnL8De`5n0j%#28j`GK%sXUHXa|P zxy$c(eYpDJp2M0uWO7wjnJRG5bnV3tAd=;8m>8#FUFCIb5a)V`WL zu;mu}JUlulbjId5d@_#E%2Y^s*5J*l?Ga3{m9**P_IIql%zhCpoE2+MW{aTbSDtw| z?g8po2b4^L9hs#+p77!E+0(yN*K=;puXQ5-F|2{)p_=Y?<{vLIpL>GFT_@A3FE|0T76z<>O3X{ zTla6gn8@Osro4p+^G-cOaj>Uk%ms`nlrOtIQq~gcIfuprg*@z)*L&uL&kZ-E{OjKP zvJD4T`ySr|tYA;+i}o&`nNXUtthyra4_zKr&5=ij%#35lF1*2+13A?W;IlbpA6v6i@b#IBnH49!QN%6|qEM&Y zIZRYUylN^qwb;99H=6Diiy|YF+=y^)weLnA@1?+A?kd8cUjaam0Y8(#l04tFPZeiwX$)~DYCTY?q4nC$fRC98R8-Y z*z;wp+czL_MzeKMUjjRj^7qi_<=ee$p@etMZ^i@1&vnDi?9Qu{p_nd4!>g+~AR#r( z;lw9I@d{y$>QtXfy^Wxsd%E_Bqn6r$=D}QUda%#0Q5CLF3b_D0iG#r(10~$?$n|06e$hLVGi#p zN<<#O((K6l=Qj)ZhQ!0J_C29=ymd9Iso#b=;^SBUQlzQdmH*xl`< z4bI8BI@0#=TTA+F=i~1d$3lpR_JA7*f&0ES(n>^hN09)geP-) zE#y6sd7Q058JRJeZ#u$5@v@Ve*?nYIHf+B(Rtwfe5Yd zRl1r!DkvZ+#Bu%Do9R0V_am5StqUx?XVG=m!>6(8|J!sShRV~=&Gh~fZ(AC9m$ot*RC7k zyvLmf_S0C$TERSu-GQ1g>*s>+c#$K|1d-5@8x&FlT2o$rUEdD72X*{iNd`Qwd>OT{@~It4Q+d3rim>+=7h1JuFmlJ(o>F>_Drd`^5ZnGBQ#{KfMIoPh0Ks@vj}gPue+hb6_MM38S-%~+=GFLYfN z3~kl6sywE7`**rQWLZFm3r-7pYT*6O>BpkLAm>P7XHCoQrs>e}XA4u`F8i!zBQB%| zE^TGQA*?^W$A_1)ZJR=N!$R6!;3s}XIV7p=QWj>;A7cZWtwg$c3SkrZ7$oV!2ib;56ks3Vu z_%4yL88Zm3P8LkVA``-qQ&QkW41AtE19d~vlP71vrx{P`6C~fDDRx(Tmx^4Lx5{I}~xH_?4yR*W%DaPpekAIA7y_8WX}=-(79iW8y9zy^uY}@9*Vk1X7z+)qoom_eVR~Dan3HCaeYqO zG1c5)BnnxZQP{Dy8a&j=fY5ekJli+cza;G&oiK|SoHX5cAA8wW4@ohHEWd8PBU4&s z;tK9sJUMq|88gUDui&MjHV1)YQA5H;>F2wU^RHSvMv-~=rQNQ+iAQg1c-Ogw=jo`S zh^X~@fcII+f>{ot*-DNj4b6NK%;(1LUf#=LlE)MV>`#za2PEGrKDU59qR9&FJoxr{ z@wj|?XeE>lfW?@Xc*J+D*M%IH5W3q3Reccm4r59*-gvz<;M0XE*G((0AYkR)%rhO5 zYL7Y%D>nH=uRTus>v>)dSmNANyc2WHn>{R9=&!TIE_|l817<$KixSH;WM_SR;JxHD zdz?vl$!0Eqoau0nQ;z$WFyTmLLVL5-p+vAqt7NhOb^vd4S2U{4=FHibogEWKL>}#h zHM(^on3>#x={;9Yi5!}h?J=n{Mm1j+VfSVwJ^{z0vvtE(y^SY`=g5~gBvx?S z4xD~xmd;LiRy&?4+vqYMOZS1^^9UNsCwOFwW>u=2zUS;+fz}(lbCvLNR|XPlvC=QJ^Vp81X@5m;1NX@bvch)72`kB6Nel+EXI4%bUc z+G88^{V~*j%H7KJ!{jrTp{K6u{Wv&?2Nxt`9jM7DU% zk9*rzGGlR3gK>+4op}$W@w?@nEj63xmIn>Xlvqz|P0RXUC1Q>%=^El67W*Qr4!CVZ z%0M0Vy^k&N#7+-V=e3~sA_vNj?yz)SO?ESQ0LXnKYru<~JZIOVL$@H!UGB*CU1=)I zJ$wq>)Px$E`tr5u`*5eLZZPgmrzB-yf(_yUJI)z|B4k^O>ztN#dvzgzBmR zsDRyXE#WUOaNTT8jTsQ$jnf7Pk2WuxN0wmNdP=2CQ)1ORz0o&QggQ zik>;JZynOJcN$l|6W9VPnYGL_zAO5Hq- z-b=H1L6l;zIX`gd1~q|5vcG9kn-B_0UhN+d?rq$M+|O=bOT8YlkgCo0nGZhxDsX$T zSLInAyBvWKc<|leA@TgXHtxXcUls`I2$V*`@kET#{Sh2NQ4?4E;_ zX}jI1g2J7^X^r34=A-10Pc!>A1YYPT-MLdU;gGG^?JSI-?z5zfW zwDxnVK`!Tgu>*q)QX+&Ppci{?*#=* zVG~~mF)N~8=ieBs%#g7&*)^Mwvww=Z@|a#eIcDhvq-f@nbeO4IdFIOsUGk%(Wai|s zm79nqrw&^6>+*8D3GSp%B}uFzXEm_}diF3mo+z;gPCb$50F=n=J1>1|o7#DV&#d#T zELMBB@Va4$7t^yzmetd?X9@tPhokfdC${u;UY>$Au9l3+IMg_$*F0qbR@e)cuTKhE zNGNHj9(Eqw6Nj(^q;Cj4npJ69K8{4YxFbhZcPC_bskxPpJ8R`cyijFt$oPWs>%ca&ImM#r}dyz==%nT#!19uJ)BtFw;f-o{0G3xMSBdtKQr$d5;u zUNxqT44xJd*$;1~`mb|K%L7&Wfg*XC%5~#fkEIChnLVDvn?X|dv|P!TF+!r$(`|17 zfXzGX9ZhAk&5Yq%7HX`eq`mq~br;(+g_$L|M>zISy5WN5ea-d|ZnF2iI(CbyJ~cR9 z3vuG9XSO-Lua4Bmjad$RMO!ua4-_ytD7ug~%z%1xjg3S24rI zW(FzVaMcJs=d;hBJf^}@j#=#~p_yl@a@kImwK%NLpJH8XovHXnK$knfE@R7eRV_{v zAF1tDAR;)etIB~<+~L}9Iljv~UGg=`_h6~)zVu{2A7M|a&W$$p`jQZ9GJMKxeLQ+F zd{vre7np}ay#wT9wigz>Y1GVu64MA?gAZFy0cSeH)bn0F_Z?nr=SUPnMEPKGava~c zi-Xx(f#B|XESRXPLmdts&QSRGoV8ukYc~XRZpwo~wV=*wdHV?gFDTI06Nj_D4IY?v zg12-UHhWcXXCaSFxyQoJrfhL^k2AV-xmm=B?$j*Xn)4MWmf~j=6Nc|+8z{|A zXs2-Nx+TS<-Z**7|5<~%W@J$s77(BRxj=CU zIPxQ_h&8O#9mHc)>hFn9t<-Zi*t%QRs}h8w`39mQw<7NKz&oY#zQv`^@QHN9tn#h+ zC$kABk1aXpO2C5k2;n=uT6e8;*~L*6ur?jpD>0Ld3C}k*EstW1;L(6WAd|#le)&D! zedLnGN*iKt!sPT)NH#i0xO=aAszIC^WS;E=C>5v9&|xiKEZ;^lH!#tjqtho$NO|uK z3M9~*cva=MBpXc<6(r2BfEL6fZzM!^Wh$ zG=dErJwlByPNwsx8a?4gWAVITGj@gAV%NRn7|;qn%SR}eXJ*hhoM$C(2vgejkY#>8 zZMx6XWcR=<7Y4Ej?4HeMy3)CXxZWuZ<9QJ>UGJiLS6V%^_@RcLv2<@cBhHubaiI3I z3YO-aJBtapP8`J6YI28jj_7IYJsiWPUJXN|z3&Xuj6k^}yQbwe*6WM2Wb_=XkE%sL zng_Ykn05_g%Gxji)Hxde67P2)*pO z+R?QAmrbd6n_!T_6JfB-^QGY#-RPKfXNozsZ)HF!^Sw@;M?ebd^Bt+1-dK7@ED3`* zOa$EZX-Y0=_aC>$Z15LQkg|Y}mH?(PmDqF!;kkQ*lI zC40S;k#{=Rt{+Zq8aJrTC62s$LnFr@ytVD&ig{?-#|_T0{07)*^;mg$9n(#Fu@WDn zk_=M<+O)2Z8&y3=b566J7Yd(a?wUJ2+je^+vd-a45!-zkWdKCUX1eGT7AJf}n!84} zc*wkiea?~f5&2z><1jr)Y+R*r^M_;d>|==6wL79(eDv|u=K;Rh0KD!%3kEh6?FPJq zrUsC9aI27-OQf)cMP?XvCRPZ@!)@V{Hz(SkQ1eLkY=Eb#hgQd?A7{8I@}(9pUh6Pq zUm>_N4yHOfXM|fFOpiytg22PnxXmjxXF(wxFK*aJT9iABqPKIOIJ(RGqG{Jw8p3kX z%%M>}Pc5!Nn*D6VLh0>3QOl}0XFIw@Ewi30L3yO%%}7?22ArPAf&>qKRQ z(WeGrgG1ro!uPu)xP{>OY4*lCTs#$4Mw#{pr`ztG<}!5=b9`URQrld`RFErp;h|dT zp3)yyD{fM&QddRUmOedMO0n!`v60^vIIu20nY9|O?yHZt)ePyU9P}Jnsz6}ZknXMd zk1qLGgc6BYW$=G_t-ij_z>j-zqIg;%?X^_7(v_esjf!cj4vp5;@b&9A6O4nR?Q%~A z)Oak@>?N-As;tq@@5jS#MSY5vsypJnzev%!yCtUvQ}SZgE~VQZ}M8@H{uyldZK8xFnfZmUnEp9o8(`9^X}Y{1;TmyDDxVu@BRSQzcT1 zyQ(97?C)tn{jiN82L_A1HE_6lOO!df+Ri1Su+&sAwsXkQ^H$#rQO>Y0mZsx!xFf-# zqP=Sz@vdFX@14`tI#$n)Ic5XAU_~4oPm2Z*a?O)T$LjOfum(xvYH78LaC$Et1ffr< z#uZ-CiXqBF)d!8pd_bXsT?s*}lnVytz1!Ynq|L^jYHw6LKX%{DPs6MI!+&HNoOv{A z2uR)?VAPM7lXkGXExU`%CrI7VD>qYinV0DrfIgZ@-Sxs<9ka-@;Mx?}E0J)y$m1ta zdR9*F8*biyS9JH1y)w)YcU{K@d)UcKV$$9)wBy!tS1*W`d%a{qa2`%4 z*ee!Tlb$|vUDd3nHaH>V)$Nenovz{(;DV+RopKF6d(Y-qwbxLC@39UZH%gb*ZEihz zRnzIa~SmZPFtIGbwlj1#@z|oHz z=0a1W??4nhl;@^rj@o?I`xJfdBLSyuyzS5sX&r^76vaR$h?9u;y4GdsBALA3x6eU? zI(7`Y_8#ALJ6O??zY7i#*rs-xRE)*t7n0p$)+t5W_c%_<^Rm9dABg6#_g0f`%PosS zqbI$dR2O-NDqZhyrN@F1al)UQ(&w{}oxN*-WEU0J-VibdA8O*`-IuW8d>TgJ3)yDL ze2xLfJj$+Z6vU$O?gnpMo?bCmnWN__&atkv%)aHm>%5^*ygpH>Qr5a-@|rA=r)_2~j%tJ4;m%98ccbvkviXcN`T zE0oob6*U=~$GZ|<^^SMe6|Bm_;fRcw6~}4tVO?lD+yKbF7hwz|WkqkN0I@gV&Y>}- z7WM7q;CUM3>5d9cYMIqKuehDM6)m-?x0kk7h+Zb4HuemWyFDK0N62>-K(mA8*Wz~~ z8Q{|t6t3oWbMTw?k65t5o>=RAj>yrY*BBp z`g>lWORf5N2>mZ}b_wie`Z=g)64Ui&<4j(O-5rWmQrWv9XVyJdQa2ORS9L5-kv<6G zlB9uzMp8h}rtTnfyD}eM=LB1?c<-4W(&q3BSn*Z@P6JqVyU3f0h1TCQ-e&sBZ-e(F zvvDHnG+mv+@2WsBs=YH^@PR^s#)Tc0AfwTX(yjCGmL=M-obL0}JkjN-JRwf417XFL zdrYFSgPhz((vL{I)?yi;R!TUr^JQi~aIlYdr)#D_$*w1AbI9EE5^Y%ZViiw!sUFio z!5YxQgJ!+Eu7v2*;WKZvVLrSqgg0xW8qahwxtw{7xSjFYjQO0wG58bVz^8P}yDEms zCGBU>Hg2r*hrGLceD3s&Y%vdZ?>WkjZpJufJFl@j4i6k4loegWq`QPTxu<(HXKzbF ziIz@>nk4so<2$vUe&w*s6COx=?QqX$u94`oq0rEK3@b5%4VwL!)q|Hk$n~c^$~@|y zXL^&{(+b)ZEvDygLv!R2zP?V^fhjvw)x5;`HS+`0w?{27b;yk#D)@xa)4o0{%?Gf{ zP7DH8G7G`%c{FD?li9_NareZib$dM|Po#?5J2 zVjg3XPzeO6R8u z>uNqB8Zr{E9u{lO!XFKs-WVA6-f-omy>oiGx`rArDbB4sTs6FggBrJJOi*Df=r*~~ z%R^@_-6VteF?FLBwT~Q3zTjU0$aAOPpPcTzLO+ZGG5dX*OGM=z?>K_uNcRfPdWYog zWu5klUH8TT=B}s#fgD)+pL+lOxuTIVH3{F2@gKRW4Vkzq6!%Ma=K4%tvmn>&FPJEvB+n3wJu||l7ZF3b@ zL^_}mwbxn)We~-gJP)YePuQxlw|%@u4}tY+x#Uf=v9Rt2u0F$%Z;dzZ86t)ngeQ&0 z`QGl%WzTtHPWqghU^HTyH8fza-yXZVyCI$S1k8TLYvH(`p0#!`=IwNQ%NJ!iN}W8y zV1!-1=qpcfggbn$Y3Xb_OcX=eQB{WrTdZt}46HSKxY*?57A7Zlja$lSqcV{#>Mn=eCWa{T1wN~a+=~^i z8IQosY4Tpn>+*2Noq1?mAePPsOjwtdCnf46d4*#5+@5R}l|lH4!k;!l;eq=-k9IA$ zHPH1yOnih}^i)`E8RK|nq3o|tyx&Wo9FsOYOKHABhVGM8WYQ?&s^BqAoGt0r+Xij!{qD6gvO_0mw4ueA61m=Tk0k1M02B+a~bJ) zFqzb`qE1b%oVja$?I2M)Qhl+k$13`B zbaw&Nt%-5%knb_xTA7)67dg&$W-Rw=#$EG>jygqlT$5-xTg$b?L0z!#8i3!sW-m(O!{g zeUnttKH0+Q?An=dBqkTYGH~tp7xuXYW_a!ImV2dLo{Y_4R(!fSCmu%6W}`G=`Uq58 z4-OMJB#`7fbUxA?XdGl6d^ta|Q1KZ)T~Csb8EOv~hmQx{zcLOq#$kbS$=9yr0R4$( zuAgTfj2eo~EO;?}Wb91GHIz)n88FpJu+#8>)qo=qkcPySR!c zfW)haWN)5s6|St&arWrG(N45oJ53`+aw+7Ac7xaOMsZ!t*10$qe)w>YiZj@hm)C;PAa6_Ikfc9g@Ub zLAZN9MEW1ox}|jWjUICqC$zn*3ct5l4%g@huwt!!F8ask(BFsId0oWp z&PZo<-FjIqnTR{fs3R{IF7?lHo8K%zXCV(tS-bC&+C9|JkP!6sSTyf7P#`QmPd^Zc&7XTK8t#f$Z$#hJ&?WAAOvvUY7{X zr;%KJFS-qN&O@`ImR|59Z1p-q-dsI#68cUHE6pNa%k9x`xV}6R_=xbpZ3qKh+~Q>Q zdL3ogiL6-K$3#ULm~#gqjNS}yUiMp^=5slRl6#$9Gl&;^l4h-|z^r&Yy3Rghra<2e zc+v3*F^XHOK<=HPCix3m(}UA0Hopl@N{4xH?#Lkaz8h9(J}hZPcVBk9%5orTkoS3l zwWgqa4l!#rz&r;3%(%=ntq+=`b*+|4#591+`xmnuaB ztbxySxPn|_+-4k^Ht_?hB8Sjuu87?h2(-9z9dYcV+Zm)%SzdkO1DD zniOqjuXJ4*dzKMN_jliTR#&5vBbp0oT9OAf&DIDj+r%{vI5U@JYr*5fE~&WNw%|`j zZ&d2J?pt(j;mxnR*heYnBn2;BZ#_IUb zb7--A?0Vz3AWZjE1#H1YX0hP~GL9JX_}~mbv^x6MOsxUV+H_zss!K7Mo}*=~ zKC7C?&I3Tkwz4^55$D}xpq91Z?sy^F7*1{vb6_r%yd7bf@tJTZ7zTj(_`r5(_V$_7 z(K1rK?HZ7^bmGUjZ#FHH^SUPgl3) z<(SW#Q(A9_HFei2`tMWSA*09=H5EADK*1)?lQntY9$w9>E04jOrsl453s0r99wG{_ zv+U@+gtTg+P@~bV#LA;K=RU*0Zd~xwldc`LZMN0D@EeUQoW$p`WFvY=I&Sr>qqa>j z{N9=g@?v)OZr1iefg8hzU?;KjgOqk0y>GbkKG65jL+mTL96b8vel_NpJ6Vzn(6d5- z5uPZkx2o*Ks4;1fi$Ws^6!Kkd<$aL?YUCj)uBI6#e z8NDYCdAT@jX3t<*i(G#^e#s#eNNeYk;HW=R-01xk=#Hwx%#(g~XFeB(#hrF?ek?U! zh9LGQ#jD0VS?qK5bS*wib>9xnW@R8>$nMV)V(!l#c~+L&Y`dc*JG@hPIdL_~y28!M znw=thUtW6F*wqvv24Ds)71DA=%)rL=RSVKm%$0*2h&s=;N zp|@Tgo18}Q8+@^jxmS|i+`ivpY-0AnN=!&MK1RoQdJdT|9L_z~b#D3MyrUCnAZo|F zz~mA>iC~!Z=EZdPtLJmX!w&3~$q}Q$9|s2Z+Pn6C)!?Tx7H@pN1xVap5wa%eGE+16 z!_(;4Ve40)sLGT#2rc#)ixO(O`wPX>-uk;pm@w;sDhb4@16~I(^;8Yw_=d zsz$}_Q?}I9Nb4f`=Wz0fQk@6UdcNF#33?G9Mz{m0UlHm@Z5on(6^YYl8Jn5m%hVS9 z4P(2ue9w0FYV50rC~cAeGb(+>ttF2$g6FfRUUH2p^e4(-Q&ZSD^P@=V;JPw=19x{# z+|2Se_n4$$d0i+t%{#G;L`rsFWC$7F7Yjz|XYG+O4eoEs9A@cbp~o%QdxlM@E?5uL{H<@YGKZQvUEZJyOu zQfLww$$=?x?wzOG_Fkhd=(t;Zlq{;;p z$Cp*H{Ht4L3_$p+mR+Nr@pp)tWo$4o`=NN(h$X!f=sM?=^2uWvpz5QWxXC+w{M>-z zC=>1Yk9w~KReGSH(d-*7ar3RN;X0Dr;S)6B&4irF0-oSx@xmzAbZ%Epg!g5>z9~^u z4i@%0yg^l?I=2(IV~#xDep|xR&*E6`xJr2*DLb3uy^*xCHUT~k}x5f&1r zv4=@Vp~wy5pRoI_!Y+nKRI^{Wa&gCt5S8H6tSgF6l~#~kzK13uxe`))ix_vc%QCMC znlVNv%jI{4-R>uCz8Nu(xn7&rBbIudCq%mhPn~2zp^?**t>S)=3nC!<70*h2*-0-m zs-B*=#d{7nHPo8(g<@-9;fuyuLY&_A-a9r;EFQ#bkC%xfDh>E;>|jxc2_h>!m~%vm zV_x(?oz&Y~Z$ER!=6S*R%657nL*mC{5-VKwNIofVu;Og!tN(I2)Cm4zY4 zI&*SqF=)Xx(-z=stV&_@@lMR|XEo=njtYganUQ=nUKW=&T@ZUUD*TH)CNvW_R`!9% zj`I=~-5Hd16?Kiuxe(fSoO^xdPYsyxv!IV1?{zh5YGd$X%AR_UI;s{d`v#jmW-)Uc z6z^UHI+`JNd1l2-EsS#yZc&EeeKr+}8FU!<-5tKxbPQJQHavXd_Ik%w&pvR+ThOl~ z2}F7QV0&obGPK(7XM(q0YxEg5NMk%>@P>N^9qWh`2L*~#-sCZ&-(i*J;VTfH33_*B z!Cu`XDV-T)%t>5rzax^F;n{e}M-2t%GuhWj)Z14PJaC2V2K3m*i`C3KGkH!SF_Fmb zdvh~Q=VTUh%-y9;De-nI;`(hq2Jz!Y48E#QHjO-MP|Wn0F9}Tfi8o-RY{2tIO!2jb z!Stv@ha>IH?zuiO=d+a&WKc9KJ6O&%KObt{eqdp>jBM%+-9g!4jN)r`CNf+}6cw?5 zGjKB=)d#Upy$GuGK@DzNy#=cXHwU_11@aSc6b#vMN^((XkG=spbyeM zo`=m2&h%$g-c;?^JkrH0A#a9shH6I!`uX1prhVC+)VHl7g+prg1K!@#fcx)rt7!-m%*i7V3G2?=bcE3baT)s*!)cmXt{_$=&4 z9kWGC+gLf#<-C_JAmxZVZ+b3wFqK1HaZrJBm_1z_8+yBqy%uY-`1*qQyPopf z%1kSa?Bn8RN=Uig&~WV+PEQdAIb*3ax_xoom(&zv@+CUhVR>!xovxU8Yt5X>-QXcs zL#x<_j#5GO_$jMOSuITk0=P)mC)I96HMnu~uA%(%Yuw~kE?Qk=<=F3DL=Pg4yGJ@ zvN;n$=Ju<40^|GGMqx$Ivx*SMbVr@j$7eI#9{Xs)h@}l|&AwK`Lf7t_a|k>+ zxDLgRF$%LR8|UpSCg}w?o0j8qUI{k%^{)&DdEi-t)p9WMp{z86JH^LixW+0Y!`J8R zciV%>oyfqc4#O=3c=}WFxFs0{`La6;_=aQf zL_^-E5g@+M``){+!nlm5UEHgYEUe-2?Axwj58YZ}(Jg@WvU*F)W@AG}Q|~~ZbKTaU zyp)}D>mdNJcTlp>QioRdHre?ctuZ+6`2;v;4Z;)JeOa33R7&O_y(XVJRvAuS8mrgm z4?EorKAbK#HThyxy)g|;FlipnW~J>Ur89M19%*2kgN}}hK91^cbd$^~G}XK~a-HC~ z(1P|m>4o|{+;ICZbBK;gyRSBqBiFZ05vbC|N3++euGY~`c0AnL7RO!A?F`LvM7M+> zI^Yv1S7>r)j!$&}^Ew85+3Vd{o?|OQuReG^RIqe;ojSUfGmoTrcQujcrJORM8#%gx z&Fx?`3ha;D^OBRgb=#7}(FqX!y4%;?)*R7+VY(yA{WqC22Ki}f@*4c@U@SW2C)KsY zQk10UrtsU~r7dazz%uotgMzQTPi}W%ad$K`$6(r`1;|Y11rL`HwD}aE?YU`G=tXHD zVP5rnCUg_HPJ?OJ76+4VpH`wGNr!K7-%5zsWXp0sDvRFk%s$c|%?Nxr;xUgIzevS^ zx9g*Pa^cD7di6@H#p+O<4H`c#z&g!xacIb^E@3h0tvO(njHD@AE9nY55My!VlL&5Z zd&1#FZ;C$dSae}c;#Ta(H^q}iF5}n=jcT%&&D}vxW#+yvXA8_~bWziGZd~ZQX)dAR zX~=mr7VutHUa{p4psLOq*R97CG`E2G`bri&(Mqi_v)SA%2g^@*v*CyloRhO{!>oK5 zk^-?}7AJw;9-Bfuzmm zbl_1HfK}Y4P8T3D=A~i%n%!;R3F#urud;iTj}6R3uXi5K zCV7sUn7?ArC12Jd!)WSFifsbeDBa^>Ku;@;09UbHA2BE~nfPxrOR>W|`#?yuwq`4> zByk(0h;i34GKf0OX%exv%r+Cy#Z1tYyrQ=1#bmnMObzZe-Mt}}xEkq*Q7p&vFPt>T zj#p-Qkk#3nFLhI-;qX?b!@P3utS3+^>Cuc6UB)}rT<6en2L@&+zA$IYoXDSLe%v`< zQZSuUCDH3gRcMdf=~{X?_3*DJn?ts0-WPafo9k4#=F~7qFC2<##C) zkee-}>dskVtJ?G^&=W!K-r={h zm9^p`F6@Q+PEDMXW6?f~MeWdGLW09_T+xnWHVSj&c01Okq&TQEWYi@744C`dQ5pd$dG<>N#E diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/e1556049ba9794a15ee21aa283876bf63e531a4f-24 b/vendor/github.com/pierrec/lz4/fuzz/corpus/e1556049ba9794a15ee21aa283876bf63e531a4f-24 deleted file mode 100644 index fe1bb6515770027f402f5e181ccc298280353d87..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 147 ccmZQk@|Ey)P+GZn|N8>%=qn5i3=IQ~0efKBR E07f4a761SM diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/e346c715ac3187598d8c0453d9e741fae1232c99-11 b/vendor/github.com/pierrec/lz4/fuzz/corpus/e346c715ac3187598d8c0453d9e741fae1232c99-11 deleted file mode 100755 index 22d3cf6fb98b691629a5cdd0d89e80c473fc655b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 29 QcmZ?L@|9o!0w-)V05fp|MF0Q* diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/e3acf6f2b5a1b97f5a82ebf7d1822077561583fe-26 b/vendor/github.com/pierrec/lz4/fuzz/corpus/e3acf6f2b5a1b97f5a82ebf7d1822077561583fe-26 deleted file mode 100644 index 27156c76824eb0de31aeebafa23adf9b6f7b3268..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 195 zcmZQk@|DO-Y&o%a|NB@5Mh0F628QDQ{~H(>B=VRT6c`zV85kNQ3=J)f3@nW-EKN-f z%z-KxKUFpMm1pS|9`N_Oh8={VDoFhCVY$}lo?j8qGb6zG4f!@$5^{k?&qL4b*)k%7Sh2pJkUgct-EfII;K289C(42C}H LkKTDK=Mn+{qihY} diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/e544de8de59a005934dd4b7fd465c5bb0046482e-26 b/vendor/github.com/pierrec/lz4/fuzz/corpus/e544de8de59a005934dd4b7fd465c5bb0046482e-26 deleted file mode 100644 index 2f3eeb0661b9c126fc480583e5dcf5a749dce0ab..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_ty0{~H(>B=VRTA{ZHj85kNEK>UUpurO3k7AT+u LQVm2<8B_%TwF?|{ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/e68b04a675d8d4192565a808955764c77ae510e6-16 b/vendor/github.com/pierrec/lz4/fuzz/corpus/e68b04a675d8d4192565a808955764c77ae510e6-16 deleted file mode 100755 index 08838f05e71fa5cd6b9276bc5c69bff991d49ebc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 122 XcmZ>Y%CHbGa5LiqoG<`+N(04&HEVgLXD diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/ea212596f8a7aec4eb2e85fd2cdb5c2816b58495-5 b/vendor/github.com/pierrec/lz4/fuzz/corpus/ea212596f8a7aec4eb2e85fd2cdb5c2816b58495-5 deleted file mode 100755 index 8b21a561c2247bfe09d2ea4425bef60c1ca050cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12 QcmZ?L@|9o!0w*9101E^GqW}N^ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/ea83e3b78398628e8a85e2e618fa956c0ffbd733-35 b/vendor/github.com/pierrec/lz4/fuzz/corpus/ea83e3b78398628e8a85e2e618fa956c0ffbd733-35 deleted file mode 100644 index 0918d6f8aad212ad88a47309ec8d4bc613075550..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42 rcmZQk@|CDdY&o%a|NB@5#_hrk3=Itb8yFaPfs7i7Jg7`k@y}fVKoStu diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/ea9af92f89e6889b523461ae7b2b9fecee5a7280-18 b/vendor/github.com/pierrec/lz4/fuzz/corpus/ea9af92f89e6889b523461ae7b2b9fecee5a7280-18 deleted file mode 100755 index f5635a23833b1bdd9a50d03bd0a659cc01a03616..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 130 XcmZ>Y%CHbGa%=qn5i3=K%wNecj+>j(t^ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/ecbd6bdea50b52d263b4e9cdb96c7ce078d2b780-25 b/vendor/github.com/pierrec/lz4/fuzz/corpus/ecbd6bdea50b52d263b4e9cdb96c7ce078d2b780-25 deleted file mode 100644 index 41a813aa9bd54cdecf6b746f4456ea9f98934106..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 147 bcmZQk@|DO-Y&o%a|NB@5Mg|~g7-$RtDF+qs diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/ecdd1df7d975c8cf8d015b2f1d0d7c6e00eb578b-15 b/vendor/github.com/pierrec/lz4/fuzz/corpus/ecdd1df7d975c8cf8d015b2f1d0d7c6e00eb578b-15 deleted file mode 100644 index d02e56eff147b92a29f530274518474690eb5683..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33 mcmZQk@|DO-Y&o%a|NB@5Mh0F628R0oAdt?^$Z$?#<$nOwBMtNb diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/eda1ee9cf85f3f71ec8a4eec7534ed2677b47775-15 b/vendor/github.com/pierrec/lz4/fuzz/corpus/eda1ee9cf85f3f71ec8a4eec7534ed2677b47775-15 deleted file mode 100644 index f0d2c8cea4be83518d0e0effe712dda49cecf440..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32 mcmZQk@|Ey)U|?|4^5tPrXJlhwU&Y}g{f3}mHY%CHbGaY$}lu^j8qGbRA4aVVK{Oq^<3Y$}ll>j8qGbjGmAd!oVOV-{8Qmq@d3d!eIaYj(`HgROgV70J~obmjD0& diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/f3f49f3016c41052be090544cf110c322bc7ef63-24 b/vendor/github.com/pierrec/lz4/fuzz/corpus/f3f49f3016c41052be090544cf110c322bc7ef63-24 deleted file mode 100644 index 283f07800a19702d066bf9aa146dc6c5082fabb2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82 zcmZQk@|DO-Y&o%a|NB@5Mh0F628RD2z{nuXz|g<|5@@JFRv`-%Pyz{qW&ZyMs%fYJ JQ(!_O4*+%@8{z-} diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/f4003ca01b90a4ee1be5701a5dd7d5f04e00c8f8-28 b/vendor/github.com/pierrec/lz4/fuzz/corpus/f4003ca01b90a4ee1be5701a5dd7d5f04e00c8f8-28 deleted file mode 100644 index 7dbc6dfb683ddd7fee8f38f9ac9d1a7430091172..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 147 ccmZQk@|DO;Y&o%azuNm)21W)3hK7O0069e#5dZ)H diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/f493376c3eda80cbe822ac456486734b72f891fc-2 b/vendor/github.com/pierrec/lz4/fuzz/corpus/f493376c3eda80cbe822ac456486734b72f891fc-2 deleted file mode 100755 index b06a853b78e23f1cb190543567fa465eee20fb90..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44 zcmZ>Y$}l!`j8qGbjGmAd!oVOV|KEXKNkN|_gu(v(9f6d5@w&tdZEPvr%lwT1AN38B diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/f55efbb04cd32f7828e951d067319db00627153f-28 b/vendor/github.com/pierrec/lz4/fuzz/corpus/f55efbb04cd32f7828e951d067319db00627153f-28 deleted file mode 100644 index 4b730fdd58a4cf87b4284c6a55189fbb41b19132..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 zcmZQk@|Ey)U|?|4^5tPrXH;NdV2D4RY}6vb%<%s|gS;*yh|kc#0F(prfeiUX00x!| Ap8x;= diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/f5ecb47dfd92bb0564588beefd03ffcb0bbdae54-29 b/vendor/github.com/pierrec/lz4/fuzz/corpus/f5ecb47dfd92bb0564588beefd03ffcb0bbdae54-29 deleted file mode 100644 index ec0539ee81d2a6e4796f68f6e3b82f66741daa6c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88 zcmZQk@|DO-Y&o%a|NB@5Mh0F61_tvyhCBuai99BT|Bej8Kmnj&3PVH9|NjjP3}8Yc RPnLn9K?x)QL{J%21pq6a8Y=(* diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/f71b4776ecbbe47746fb53d7749751c5c5bbff05-22 b/vendor/github.com/pierrec/lz4/fuzz/corpus/f71b4776ecbbe47746fb53d7749751c5c5bbff05-22 deleted file mode 100644 index dd1e8e9c061c7a43cfcdc0b43973d53b709c924b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 61 ccmZQk@|Ey#P+GZn|N8=^=qn6Bz(TYT09r;8ssI20 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/f724d4c839c012c7772618e28ef68d478cc00c74-21 b/vendor/github.com/pierrec/lz4/fuzz/corpus/f724d4c839c012c7772618e28ef68d478cc00c74-21 deleted file mode 100644 index 0476ee760651c63160e294c45397e7bd08f782b7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 37 pcmZQk@|Ey)U|?|4^5tPrXB1*!V2D4RY}6vb%Y$}lu^j8qGbjGmAd!oVOV-{8Qmq@d3d!eIaYj(`G#^S=ZDu<;5Q diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/f9bcd3660c355799a865fedd15cb27a18591f244-33 b/vendor/github.com/pierrec/lz4/fuzz/corpus/f9bcd3660c355799a865fedd15cb27a18591f244-33 deleted file mode 100644 index 5af2a422ba3a62ceaaaf5a18f82a8488c132e20c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33 hcmZQk@|CDdY&o%a|NB@5#_hZe3=B0Ad2j&+0RYUe3(Noj diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/fac6c4165067ef2d87a23a2530a59eb560d470e0-23 b/vendor/github.com/pierrec/lz4/fuzz/corpus/fac6c4165067ef2d87a23a2530a59eb560d470e0-23 deleted file mode 100644 index 72b39c999b57d5240281988a4d8ac16244fd3e84..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 xcmZQk@|Ey)U|?|4^5tPrXH;NdV2D4RY}6vb%Y$}lo?j8qGb6zG4f!@$5^{hgsffQh4#fx!U?85%f*7z7xAJOKd)g#!uY%CInWj8qGb>{?*|jDdlbAw`6Nfnf)O0)rCQLzUoGPL5Ta{{Y563P}I} diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/fd4f0dc77a022a8140ffe5b2e1a5ff577e844878-27 b/vendor/github.com/pierrec/lz4/fuzz/corpus/fd4f0dc77a022a8140ffe5b2e1a5ff577e844878-27 deleted file mode 100644 index fec864e0afc1805e6b39ec2914ccd6ca5b7e577f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51 ycmZQk@|Ey)U|?|4^5tPrXH;NdV2D4RY}6vb%B=VRT7+56$trQ6u diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/fe002e4c7731ecb4c09c09a4e1fa29c0c61874bc-7 b/vendor/github.com/pierrec/lz4/fuzz/corpus/fe002e4c7731ecb4c09c09a4e1fa29c0c61874bc-7 deleted file mode 100644 index 9bf6f6e96fb9501712d70443440e3c3bd4863e1e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15 WcmZQk@|DO-Y&o%a|NB@5Mg{;XUIlOf diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/fe78d4faf4ce717d84938010f92ca5e844f9980b-13 b/vendor/github.com/pierrec/lz4/fuzz/corpus/fe78d4faf4ce717d84938010f92ca5e844f9980b-13 deleted file mode 100644 index 88b3ef7e5ea0c0d54f243afe1d291fcec0bfdb6d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24 dcmZQk@|Ey#P+GZn|N8=^=qn5i3=JUc1OQ*>2TcF~ diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/ff3b7ea844eb197dc6bd59d9f8e4a4a5718a6771-18 b/vendor/github.com/pierrec/lz4/fuzz/corpus/ff3b7ea844eb197dc6bd59d9f8e4a4a5718a6771-18 deleted file mode 100644 index eea11f1681947a285ae2d927b29c0d62cdd53451..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36 pcmZQk@|Ey)U|?|4^5tPrXB1#yV2D4RY}6vb%Ad2 diff --git a/vendor/github.com/pierrec/lz4/fuzz/corpus/ff47856b8fa7323572c8b4a6d8028dcb2663a37a-11 b/vendor/github.com/pierrec/lz4/fuzz/corpus/ff47856b8fa7323572c8b4a6d8028dcb2663a37a-11 deleted file mode 100755 index ed4aac7166ac3db93a890972d9bed4272f79f7ff..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 84 acmZ>Y%CHbGabe zi4v*+=Y8))kxHV9=+Z<|QZ^c)DhUb+5&$GbN~DoerCS3fAd0GE-#GN7K%|1b-~gS% zs6|yJEx;DyK~Rb+TYw9LswF5?O~4j)AxR=sHvlf`s8FR4O5h#2N}^PXD!2z{6-X3R zh};7>P$*RtPT&iGIuxN)tAGQM_jph$B#0Nh0e#1%Pys|ofF9jQD5)eC;0roZ5|K&{ z0CqT{fHu`ixB<*`0!pf+n}8KjK*clw2%1F15KTQzJfWeDA)%p&MLZ?|RVsQ^9+8O9 z0AU6ufB^u6%i3dsL)hs*wIGO1~#a%7%^;C(Xp&;MzN^d z1yQXQHZ`%0M#j~Rtr|2zYKu{9RBWb$7>!99Dlu51pw={NXrj@LqN8Z7CZf<{6IwNm zM$}YoqT3kS#?fp=8yg#JTG-l(ii*b3jRw(LDx+fsqf!-O+AK!Vud4a$YLFq{D6cGi zFAewmK3saxr|py6CkgwT$KQsFsTQ=KNXGR7=K9AY`|_8xx^O&6^02mwLxrSi z*}-q9zDv5Ydg^m*W^5$8r(w*YSVT0k{_?Aw8rNvv7=$w*b}iWoh>zR8$eqKF$U157 z%Ajw#*CZUx{_N$4UfJbFk($>|bmHUR-N}ratuoQtC_JnJ3sVZdd1`sfMaV zyM=Sy3gW|y`Pa@EWp6&0u%Pb3W$wU{ftE_ZQ^Q}B3ALxbu_X8e(S0fIV^a^RbxbZ% zIJSGwjkQUZ!nyH$g}(E8nJh9x-x#mUY3nNof|DDlqc4wa=1v|+1V0k*@{7$m)4qpw zpb=@obwLM8-NuZZNJ|rBw+UGK#UNLdPQHlW(TTh4`ZZlF(V3m$ya@5w2mrut`;zBP& zN;%ug?ttFYa_;QMUsbkJr??Ie{#NGfu9qHWtkn-JcLX5CP?B2l9KdTT7p_#r z6K_l^CCw+;DYKAs=!4?MW(zaz)f+)(e(|R9Phdwcnq7{^U(lc?*=S6r*gP=o)oxTw zjeRn3+^-A0gX81|mMX;QC$Z0hNtwPFZ>#CQVbhK$JzLz1Da*XiijhRX{=ch`?Adq{vk-e$r9A@ksoQg#SmyPiJCy&1KZ_&=<7A5Rz?dn&frZ!tZP&2N z#hySm1ZiD}MTo7jecOddQczPbOE(l!8Xm#Kg?OQ=g#=gcOOquxmxCNsFJgY9UmiMU zQZ?>J(y*h$zB^|7$QC6OIu4}b)Y)TlS447*ttq&Bv@7D|;|lv@p6a7)n~{VTSXsUc ze<}g}b3B!Wsorq5V{Sc^tj_shEnJTUqz^p?o^K#Mm`Z3lHAh&QV%vlzSV}?sOs0qe zJLGOhf^s$yRW>MojbiyY&98ng91h$V_VD0ZSLu|yKyhusT$2owaIjp;?noi1xR$RkO`cyQS z+hsIFva1A=s~k?6eFQLoz#T31WJT9x|#dP^aA^y zsmg#^;lb8VD1jE$x<#uOUsG^*Y<0`J7_+LJww(HCCLv{N3~tQIp9YmrFJgmk`14bW z`2dQX=vEcz@kQ&z^VSbg^gYH)?KOCcr|?X85iq84#Mdyb?n7q{4!5I7XDW#2kzM&9 zg&mMb5th?{wQ3T^8PjRe8aGrzn%rcF?5uG5-*~DQ?y-pt6vRQL+@TAX@!xa`^wgF4 zb6_QaRkR@_9@71og~R2R(UO^h5(XBk1p!w$petDJRC)DJDswVD%>@xFSqB1y zbnr0c%#BvfNvOaO@$B?|oCC;B1*Q?D`^s^~^YLue4VutJn5w<|_ z4E5nQt4Ecgu-Hm=xef0YdFE++vkZc8h6kw2M)hE2JVOGK2_nC}8xSj%Z5-#-p|Bhl zbmejRXd6GxzdE8R%rDS*2?LA74AdJjDdMx3Kci9fW8EHF?G{BpBR7bkVljT2PV zY#~C+6>nATe^?4T3hgkHDBRg+SX`a%6iz9tVaSjOr)z25?VH?y^sj)S$mmggxM1pi z{oe=F`SDa=Vsn8x=GGzAQ8*d&_>hb#F^#3o_}zVISVpP)*wlK^_rgvcNX-@;MsH_Q zK=}L;NhFdDX8M~c_v{nmig|Eb{1d7?{b2i9Zl-EQs>~e1k3@0cbkWlj z4L@Q9nWa5yD{etKYY%0N0QQjhXExTwscsD~YFIJTg2lYa%F6Ut-)w0W7tb#K_ENEQ zsz&p-ugacV1mS0YVH_|Dnw>9WXM0`fcZn}L*DDl4lb^6J8_H-pwy%xAc{NiW@+R|Y zFxsl>f?LR9YGMSoe1WE+*^iFw0y6) z!B1<=lax?%v#XD5ZowS@t!ulb#-(2=G7WUr>E4{ChWWlFVgZ9 zq6?C+1+F0uOPA;N!l5)PK*OU+`(?34c`9>~d~<4Rhoof3;}L#EQRUv!l|NWg_}Kj9 z)ObZS5?2R0n+|k>abeuf`_Fo3>B3T92T00ze3B~bcrh?7*MT^`6e?W09SLqjTg7pr zXXA&Wm4lZP2`-QxG8rWAMp9Xf*F2$lcOISfS?^;(+e`_#N5QkTb?-g!9m!;1pKm@{ zMfOcb;Smofwc`*zzi=UVG$)b$;-6Je`aViy5tK)|mq_5dodmvnZ*^M%g}a>NG8VU~ zQV-ka+x|b>_GrE5v7g*2ftqda)LATh@bXLbv9#Z$I*{^Zsl{|-P~HMp9BB-J5Do9Q zk5?NT8-pt2l?Gzm*7mMR=RoGU^_BIssw7IFuM5x7K(vCFP_B1Cz??UCv{Jak%$YQEGwkXopR0{ESWzQ4DX}QK~3{B7wa}lN6Ajo@3{VSaX%aP-zfYl74 zJ~r~vcn6is*~vYGW~bP7)k|R@_1h9>5Ro=nJZ7j|>V&YzfW&6At0EaSb?s3=(5OQS z16ebWWlu>R76HsYs%(`o9_$Vlzk1z{Dmuk^gB=|m7Eu^u6M-URVO2noJVi}=EO4WQ zVvn(%(die$?LQB=vsO=O$;Qdl5Kh-UB(e_)iP>Gp&F&o6^sy7p$?vv3Lp>+ycP*^D zeW#_CIyD39Z`<)_1M=z)i$T06=r%+_;qZ+d_#~EMiudaK{jn_ z4Mgc=W9<94Wo2qMG*B5sJ8+KNGsk1y7LHUKMAYgRL9fqxxgz?(6GXS(4!P^NW|$YL zid6AJttJ*ovw_6*lKV*m8@6Pgp));G8n` zv4(T*m5XzvwCN<#3}KQGY@~WzIYZQG!IeITP^BlBVv|?N&q~?fLt*-4^p8S~uPc`Y zeL)?b^FhbB>9rq_tmektFmw)D*D(VKn$MTmIzhb?G6P!sd#r17j$K-(a=dd18B>dO zd$;MG#aotnR1}q5(wawMCFjZCbt{ME@bMOK`h^y$=8r;5@=ur?NcA&n4-RZC$s3$~ z6N8Rzg|jM^?;G(?Zt3QoDc_odd3UF#qd@kJ*JO1kR)q{3JqfaEtUb?0Ub)f_UP9Nd zfTOcuQ zcu-(9G|a8WPeT};n1SroL%4`=rZPF_J-LHG_-NHE2jl8NiD=GnTGL3 zm71Cft`^88mkQ&{-Nct_nl*eue&AW;RulCtmiTW@K#-)D&oyQdg~HuQ$WY?4jSYO72i_+i zddNlIXL0?#bfxNzMrqAndPEhQb4;_#!ett;gHxG5Z0-H;qa@gRIyJj|_}?o)$bnuL zLIMrztEP}@71zWkdN0;sVpo8S_pt;HJidE#8W#bqVphqIT3{*8w#`%=@x4&Dcl7Arzm>#{6Cs?9qPFUGT+HXAg zd?8`g3ybixBc6herQ_O}h7&|{TdLZ3=Jjq831Webj^agnA%PREa71pLTISyRi_X~! zc8tE+kR+AouCzCaqjrJEk0@jbGXk4qIjhY2>lE=VV=q$E|H=GkK;4XM0shU!AOqrYl zZ)b$lY4RC;Zpx_#g!Pe;GMi1sUV@eNO9oe#eB$Qv(xtFw+K{K)Ah+r!dE-#8Rj9J$ z769ghz{!UM9pf zSqAB9d4029FjJf}P7G$#yz<$xVXzcnQ(%Gj5r|M_z0Ih4Z+WAPRR*99ad5rSOfhnY zA`T12k#l3oYZrUV=%y`%O>i!^TkUtRJj-2QU|7Plp)HV{u=4rTt^By~Rkf3RupHi- ziFeX9caA>Y7}kX-y4reVp_vUt9AHL4p#{p~!VA6Y_iXZkaWhTey)|p+t7JwVCljWn zg{ymr4hO7<8cT9q210645g93Lpnmj>E)nFeTG^2%0y_sn^js6vu3{nGm%p+Qo0^kTDv z73J#I40%521fC4t+mzUWAukw;E+YL%9e*;=nZ>uhH$QV~ym?C8G0gdvUBe`iRbDWd zT_Xgt0$h6PEWEKIpjqUum3qxWiPWbTPSku0?ay-~6w2C92Z+-M*m9+r_WRT9^IT$2 zo+2f~g0hHq_WNp)m=_(dKty_*h9-pNL3fMR%7 zXdg^e$BvS1(bjpkMAU?ONXJ* zI-uK^N4K@dlyI%VriC(^2=Gaz&xmc2HP^devvx0dULV$gz^`B{*}!CCmiN0y@p^mH z>!Ad6p$9wO=`qObGc=7b(3l1=_Hc76$`Dk`PRF995WQoZPnvs-ju*C^yF&A>{fiqR z*De~fG6>QqI?jqyUY#M>)D2D%zC22ILJuMd(1vr zEopDDo>ZrhjxDsm8+}{kn)k2K+`=tkDL(QPt-I?*NP+v_;3Ng&d4Etb zP7<<)&B8t+sgQEBGsMCAY6g5v2aqso(;Mvv;KUqjOS7F6W1iOGff^TytQq!qOt_)< zRm5CP1&}vdlR_^$g0G4PZI`s1k#^%jJgmJpT-dMZuvEzmTzg*@qc=c-2_5wGri=q% z6T_Kh8q&gBzTK?r?g3grqPaL`cDfEkqD(4MlBSx>Z;HtV2*NIWW({Ikhvb&WdxB%J z*u$1|XXSp@&IOq}V`&c_VL)ViX3IO;Y$lU)n^jOYkxaZjV`V$+LzIdx7B_GfrnN35 zO#B@Dka2@$>rLmr2iP8l#zO%eFFvHC7Ap(WjNC7_kX7ZY@QHmd9&XOUoEh<}bMb}w zP!T5bASg%-0g1c7jZ*@u9Tvi4CNrVp{Z}XpNsTh- zOn%O6;_!J20%ASVHeTkW*mjuP;irC^G1g7t%TjV6G%+IFw&6NK!jyXzbPFTl^@>Qi zuC4E0ji3>qr^KV92a{Ynytpvhtt=%GOn0j~_lHAdO^Hg+i!xD1iX4F4z6dj+t5=S? zKq_+z1WjtyUXg%_^ctOzFTF?65k|Jjy?XXxk2jX2ywj5}=tru3C)H78%@Z}r#|YFm z9(h{7<@i4uCL9_3J12X2tq6U#C5G_q{?KHj$-AQ8Y!8j5(NYyJO5pBG@=sz^44njH z)1_oWPS!7PidNJeE_0|C?i(6$*De|EHkOkCO3B=t@o-%lz)8yrUPCK27FK2?{zO$) zIW*pZwpR2~Rml%82?oThJYyPDtn_5&DRQhf7i)&;Xm>0fR$x8I1qmV?N>kOn_iox{ z+CPRt?!~dI!v;&V(vCN$JM`KMJr#nl=BCDYk&1(vCuw3mpz9a3S(>aW{B?#M0 z1@+M0+G6hDDwP|u^-Gy(cv+=2Cpf`#TFJN3Z3}IwtUQC=4C7-@m@sF;BY_&43?2>` zmx7t%CkF%_W8OJG2KCkv zSOR&lvZiuJvBo6zH6THcU$>U+{K@E)Q_pjt?Ndwc-;48EQ=xPADy|2Mjr!Tur@Rb7DZ-?ROOGj`6==* zeI}>F0x(WKjiLvAHwj~5yz1J|dUVE}IaThI@0hW{Kxaom1K9P286W{D2BETRF9Ctk zJFlB&r8B-+_SE;Q$1&(;nJeX02fJxa9FwKNTRJ(PC6vFKw3)tAtdiGQbsrfHoTp)1 z-$#L)C~;l&UbWPH2M+lC9(j?CdF~PGwYzdQHcp(Nw0<0LdzMoxSM?4+)dB?j8#ij;QFhw(C{)c8ib(oT;HpD%K@6KdOa1^woi{quQHAl zuxg8_VJ!GB?W(ZCA8BJ{f}->30FR{@ZQ<5@9H`_oDa*~hg;?iuA_NwjNeH`Lj2ggZ zi%wO#zKNOgXyF0-*r43v;9+x+MYobJAz;4AKQFhg8+34|ah(Q)qa40-(arZD41BsS zWT-5G1A+>1^lK}vJw3&`DQtX;=s{bC2RLC-#4T5o(o4e$L^IOx(L2|@@UEiRTFg-s z)tJ`e>Gm~Lqdw;qpOM1q+HruGv8N$vmh6%RF?uzzHVP|;gNu#nAd!bBEW}>XJ;q*{ zrX+p=O1l_N&p|J$6$%)sJuIrtfXeG>>fR;r8pyVVK*^hW$FL|yU@&n7m=jTjK+GxD zQPnz~=KQ73BTCLD$d7nLBb?>KqTU#+RUiKMTGppS4fzPmA_5!GaG`TRHD065i z+F6S+)b=Y>EVs_;aeHX3fblq(OACT0)ab=dCNF3*{xnHc z!@MUs0kLU=SMAYO+;b{KZRUsaCA4JrbZhoFZ9_-moku7+w-U7(WT#E>2rE$QC9lisgX$806OqY&voo zxm6(e@4#b9_;rBic~(S%u?Mi$u=kFOhi{nX~VRP z(KC(;F)DwdUVilWx!U)((lZsA=qKC^mw!s*m$)X=+ax7spgTK#+2&lT8}vFm&duu3 zLh!cTRI2U0A8!XmIj^77p{F<8!S3tFT)VxSw5t-@V>F1uP4P0xbvmS)r!T%^QB;1t zSY{UzqRb`Jor-)^eIQs4+wjwpu+cKPid8$AZc-Y7+e%aA+%p-JnN{OOIL`Z!pRiCv zj?~X`x20%ra-Y!f4j&naiP2@3%P!Z6mJLn1L*l6mR&QeCjsr4X`hb=l+AhFR)UQ3m zFtEClSc~t9V@je0KKE!>yR;s8{iUnPh6mIJ9fK}Q<1*R2f!A`jaBZbrlJu258=>!S zDji`r7HQu;Mh2dSlBf!z0bwnYScLJxWvGl#g*Y$R_+U6wW*G8fK*e61@ul`e+eju} z2R%**F0olba_U49f|KnJT_*2XXbu~!p_sM><%dCUpGp%c(r--$SK7!~$b$gB0k~k` zapeWve{NQF+3ua5I+I-$z@9n==qbp0IF{)HIUHpZXbE~V6eR&1`8oB@*Uc^N(Y)RV zyp)lZ_H}fZpkTJn-latp@i~G-u8n4V`|o>^2fiT>nrYyQ_aev+hhXFfL0((CIbmYE zDCLOTjYKd#%y5Hn=y+AM^_3eGD(q`Hc#1BI>h3^OUte-B39}XBn0elATb6V%-xB$N zhHJ<^)j3|;NKPMFZZ}-1HO^Hmt0+6-@wX{IB)PzcH(-QBsc!W;=eP#^e7^d{o2xaI zh0XDxj+f5-lbEnV6XIW91qidzR3_lip@srHWs>E*sAo-l*SJ@^b?L$h-PgR7EeJV` zW*;~xMN^QpL=06Pa)q7`FBa|K(8)^n3)7-xK88+5RjV5kLdR_arF2aC+ia|gP${J{ z*EN9f)NW$a?VYbP=U|8zI;nC2Mf(hhFEd8b z9j$GCXrWABUc3{!24P1^)C*`yq7djLW|W&lRLSnN)b~@MdU$WgmhuYmJ~ijDy{9SW zA=iS5MGrqd8F@>Brd>XB6)Wo{NXQ`q{TM_--q59+WXja?&GA))B3mw_1M0y-$#jk6 z-d!z6uE=2FQ<*O_qDXiq!eCvUr^9!YZH?@p&C(GJIDb8&*p#cMkX}e_$qADaGiAD@ z8@Ht0p566(m5^qNDF9Zh6sG1sJnD}D`5}Bsy(CK0`L6@oV+S^r>mi$v!1?lrFbi_R zua`GkbX(2IeMdw`M}koY+aYVh;-V6@s<(7;fGJ{HfmAT3R;DRne%s5oq>#@N1F@Ss zQa(0?R)hsy@fmSe*hcmhPD{lk&zsgxzB_fBHS%^!`q6|_vv+}@ggrBtWIjB0$fDp~ z_Q<7Zt+*TOylNt+{d8qvb4X2tMVL}Sv zT7a?!&6sd|h}{Z;k;4?-E40Ock0k+D;-(_^z2TV6o>^MW5ZFQY6oAq=`l%9U#qG@% z3p#Nbi;)^ADst|7)qh2#!DW+E0b+wHA#wb^UJ!mJ5DBta%Vxv&JA07*oWe-3B92cq-oLLJ^?yecUx zriL)+sxGYJAkYA`!nY|+TVPUH#sbWQ!Y)LW|0oTpBbbq1?ro;DoG9X$-i!PCH`>tn z`E7mI*wP-pp!M$K-yyjU%SuAmM9{?&u4Evz-TGRTx!Of)r0RjG_17;*XSuc(a3(#A z>AhlgQNxO8cCR$bXJ+s2@LvI{vL;eKw0Bx+Cb;=$Du)+rtJi|q zM>S}O(yzKxnZ4X1uvk)o2e}9HQwY4E#hy>S>B@6cLQV|6DBWn@QbAbQ=3>3~KYPBK zi9mc=zSI`>Mv*v)ByXpZSsI|M)LBJI`*_}8nHOBm@5E~lG#!t-i2O5aoK@o4LwltI zQHB9e2R;Vz$2sMXcFLYFXjo|0=FQ48>)#4<(VINyG&Cdf;4wx7urX21A*%K?cTzJb zqS2yzEJB3nm$o0c0Pm}JpM9R26rC+y5J~5t(D24f7(~CZ2%VLI-w6&_p>yq_n7G$cW@*n>~Rl#uH=qEQ2RSb3oi)E ztpf+8oDLh)wH`x5N<%>%ZMVLfE{Our@|G7$DL%6Ijb*iYp~qc*k*zJotC+L6X(-Qa z&fml&WY>&1w`bmSIihfnLH^O3T=^)4lkZF{`Y%${3qDHK@l){^Es^IbzL*mEu@F5f z@C8YXe)ibRm@T=#-L83_v>-|nOM7{V^nlDZzWhV7!iqgdFuB&6h<-lA-3F{56ygb5z{;ag-h-pLJM8KK`>UE1T3rFCCVp{ z3pK{;-s0CCfn+l>bL_*~R-{xdK62Pjqd>+EYQHKX(rJMEA$(M1EqW_UfFMr;>`W+! z^y-VlfZ)(gaf|exy2lmG*g( z3s06P-umPU2FbMsWHpt7yK*6i+E848ljI(Z2ETi# ztw#9S7_)B1N`u0DNR(he>xNiBKwar5R3P)MYx3P(?78bzy-pLQ80;a>pyBn}+EM$$ zmUMy!9-~*kUgyx}UbBAy9$$`Q*P zRhUyoIbiBDL>9WmkQ#EeDwxY##E#0;S)Nx)4wYp1wqb(%F5QZ7?3zNQ)THv$3>>91 z&&p3ib9@Vkv6w5mxatq2d=tZKQR}!{K9H{=YLhc%@|JsWR76799TL)KY~{S<9pfE{ zb)#i5Vw1jK1mJ~K+a_(*8{ZA6r*JjkoX$pKlEA$1{Z2P%Q}O_lLFLuPxx8C7$18}9 zLCSTJ_v`Zu^hD=>su*Hzun|vIZ0yb(-yGqn2gq~=Y{a)s*}s!)?*;L zTe0pCvZptRVOTxczetS2RYejR1veJ>wCB{x_o4vo5oUeXMRrx`Jcv;SjvkEWr5n|5 zidN;hrQrtBMp51%e%n>4NTHag0sR|8wrSba4%uNB?G&vwrT^*Sqf13T*dS&sGVvYvQ#5F zSH^9cJ}V5d3PtD_>CUE$RalBG|<-F#-zjMVVo>EVe6*=Z0g@ z#7@4^4!$|UcJn!DT(_$Mxss=s%d^G9i709W@oG&Kst!_AaX+frB{ph{3Q zkTAF~C0x%mwJgHHigo;%vUzy67j@;jb31JHSei|aAnQ{e%Rhc@aY`rR+O9-mp zAzBuAc4JO#VeaW&6v^fv3kq97mBU=gtnDyjOT@~livBfw)j8dv_Xh`STMj$H4X1Au zUTK-MBFGmAAqIVH*h|bHbi)KM$hHGRrk&iwmxa0;)ycz zZWUN2r7DCy2_`d-oRSGe*4pf6BAW)mX+U;4Md>G67A)xemeq5dC={1`mr>wVJ{vm( zx3mJensQSIbIXeGX(Dm)B*}7BiTe_X(Q;yxLPwUIRkk%)Jk}Ft&nOozfGwu<-cL~i zdLn2}vVFHn6>bu@**X=3Bezxk2rc*7@{uRZD3XjL3tOrZ7;YkI_bf@^3x0~A)<{?z zoUoCTTR*T4y!Il;WXLSPN!4G_seV)2;>1N__a!7OekeXVrp0!oEE?xlwFE66{vv~I|A+*)LT2&9??fTT?q zE85c;my4!Y;8`BQB;a(l@-m=$O=@SlHl6OYi`FI<;lf7ehodCp!Adx^;IP`H6Pd1< ztrM04Bf{=ggV?FYa*_iJCh8) zdf2A$6_7fttJB-`g%=Kb3TVBB3`yY`G`N)q^`tpgXsd=(UnGF`j~tl~TT;8PBXD8F zb#*O*9<&@(n(zRv!@5UD9VTJ@~k~(P^Qwyv3} zyysZoz&{3kdU3#NXE+03=IQm2hv(wg6L-=1u;~IgJte<+$)@ZUTYI&PUTRV=P)pz8 zI{K}&6SCpTpJGj5k4LD;$`L$VqV-a8L~^w2;=6#$Oko8w_#KKlv|XLlkJ(M`VedaJFx(_Y>)NUkHD^$B zSDLNUK=>C8f=%C!5mb;UPgaYs9Z9+4fJa~mlDa`~M!l?5{b16x34~j?jKd&DBx$R3aKqKl4 zdJ3!>9-U2sxx%*cf^v!sw-pFe>J&fPa&WZ&J9=Wu<^g8>r}3}IE9B+W~_ zX|~cR1SYqa26PpO6`xzLZfo!h>N&0FfPymL=P261`BK>$&976&tis3DkU`!xp=iCBVB&r7A+Qe z7JpX-*CB>!wTcg@g=0W{Af}~_LF9J^8Eg%yS3*THeDp5-&ZI6n)vVbao+N(l(40E* zO4BrggBtqWIjz;|zMl;@Xn4UhGB$9g&V@3`B66>p+-08YxQO7%&6po{nIJnZGPJDE zQv<;U{ZjO7{0Tm_r?P-743!L*ykLaS0@Ci4O0Hjw87Y{wGIA9}u*G6^hr>%=_G4k& zVMW(Z6DpjFQ~`TU7L<>b2;nJG*9tDQTOEnsDCqpVbGt5%Rp&`BRw7()rZIXJ-IUz;?c*-}kQ@g}Vxx94OE(sLju=)xykkd`ijH)wPn?jBnyQ_2vG* z%m+c?L)$wKYriT$5Yz;E32&#p0BQ(`B0vz}LxB$jN5QJSp#VvYXLak=0UUo#)Phf- zt(?yHk^mrIP50XWR=M@ZIKKgs?+k!-)DVxo5e zy#0oSo|SdF)74WD#~f%SC?_c=(^fbI#ipY!Td51&^Br;ggM5#Yni zS6a?$f|8@qgrP4`MGvIPGusN5G+S(T1~iXPgWjOOUM@=Wex?=8aAAL&%~~ zCwiP^T>DCdN$Zh~v96t~vkrH*wHx@cy!#D|v)=(AnVG$n!`jj7xy5z%{yd;bU?wh59Ldj2jA!6$Hj^Ylc4?(2}!52NuXi zg%vOtbB4~XG!u808q8##Z+tke_J(vXCkJ^BsBrR^swq~c`5uNE<;Z|lSZMZD z!XSIGVb=N;O8l~E>N}N(mZMKa-eWPq5H0(CwpG_)q^k)IRRx82VWw|O*@8|4aCPK6 z?C!=8Qm-0puD$oGbs{2=d?}p zbny)|%%J$Py*kMc6nG`lUTU5O)b(bDF{|X=BzfevQ?@NC!0D0XR$Me0;sp&)A1I>P zi#RP2$sOwsacagQHVwtzOSl)%v{$Y#!E-txYN$?#WKR!hpmt((%Q>!-dO~M)vxlrN zxXlv4_mAAR>X{2Kz(hhEfW-mO=b27T<721wCOw^lT62_vqSD=j_+s}dsF*;2OKcR6 zb6Pq`aZ+r;y_i6h1cG-p2ZSXSBom=5^Hj;xUB0fYPii8Ec2to`DeIwwdCp7T&PYAA zHQIp5U8EI8S1>|I6;g6l8A}gp*Psb#b})4xWx0#PYbORC=CjI%li|tI3J$Cm&aI>L zHa_xBL~mc86moclRj^baCe_abEDo-+T5_1uy{;96**qa_Dnt{gwbZ&>g{C&uQLlcI zoopmn%x1rI{?#6nLt99uHyx^FV}(L==KRB@}nI+**wu z^{bYWY+0K@Qf;F~3~LpmCX7k6Xe7mp291hByF697Zi7J@ewQ6)uajYVM5v=w5FsM@0?ii>P*v21FJsL`U^7~5(Ua|BMh+j0?IX> z_Lv+-sYWkK{zmYoD!)QWOllJo5YmUiBG$mLRl!~1`yAE%6WU6w(3I<|;fp01=8epQ zaT|e7QaqQcPOzol3R#@pjbx247&7q6NQ`tCr#6VA%4D~iG#TOTOVCJC zJ$1*aUEW$t+KJ7}Gm&^qA%lJeozEPSXOcD68CW1j)r@)_B{yykIqg)$$1hvj%Z%BK z+lMzdtry;b$h&~=VDHo5LDG7P9L2Ull13Eked3ln$fNdHKHd`II@PKdI~krAlJcX* zlOl?|5fp=6lInuAF=|&Pz?ahQqcyEeu->yDA7NlbhOr1FRH5@c*0wTao4sjye4_+c zb2QE##nLsBSBGD3OIs8a{hgrmf3Z z@wH&Wos=NV8KQ<=2dkK)!!6N!mcw?NZ5+3Em`{}Rm94D#rc!x8!;c@PXhnBVb|4|Z z#{0*Yai=&d5VD7(A|BTCRQtBw@n_h+7M89xN1tA#cSt<7;x##1t#pP=dXd}-^wKs? zb@!DQwt(`AH!7&SQJVuOs>P@&TNICTnI}xUII!f}nfv(S;gE@ZjWx=Rr2OwGJGmZ7 z7KNKR<+Q5-C-`Is%h%(yLWXZR-LrD6)@jp|jxCeMI8Qzmpe}?y#JNPnV-7EZdHqyU zssqjIS>E>tk)+%)+{~hMVdh2JFmWi4**;~fT2BJrDP_f573irD`=+HtxqWv8X4=To zDhuKCd9POGlPj|BwoXM+xhdbctDG|;R0SSjJ6pr*z+?)(>P};UJwHt6uu6R(f$6>} zoLtHrHPbMF5h{IH-QA^oUkQ%KHXAn7fjE0K58aEAyq?mAwB{ETv~^_pUl(F*r2Ri6^f#ZXvJza(Wta)DK^EivT7}& z#kMLkXp3TvY+_14l!yedB5>(fp3T#v-EHbn>T>2m^b+sce-|lp{zuIeFIImP1Ox$( z9R;lodLoHc5KBhEVGjG8Ps=jpkU^t~vFr$t9TUE(=?pPh?E@lCXAqpTilqa0m+bEPI8*z!y+N% zgv^aa`ZzJ#Av+iw9<B0;hNx|NKdboK}!RIHE3npf41ZPilqzN?V zu-VTAQ85jasgC2ei*#Gfwj_>pn1RctBU9Gq9Uyv&i8sZyBSXNDp5 z4`*$}=iI}zl2t768%ImHXHrKz*;a=B!lv6uFqqG4+oFO)a8|`WS#uqwk9BWbaoszI zFwjwkvThx(G?$vVnq2J{!;X-$4i^h;1Om@1KdPqPm2<|INAp}?PdU$l#xtp$aB0xz zida!~#mM$FMM{MoQF>iP5-{7*LgwrSPAH+ie@-G2k^yM@jiaYb4I7&@-+in~q}-2H zV1S1-yFknfnRL!LQYS9;uOqQGrORL|*H0>`iU!w>qK0$58wyV0eXQ$*jJepB*Q*Ayb6waRLvaJ~%Y)}wvp(^&H|?CF^Z>>~|jmRY4Py%meS z8(4`AWbYZdb$Ek#Mkc_{i)E&X9;)_7eHg0XlTgtJ-ggCBeW@rwE~bHF5_hOSbz-y+ zsFZ8VE&8cO=@b`Q$6bO*>M-^6%P#YzXjIoQ2!=Xtq3pdVnI*$Gw_5;e8X41FoX(yl zEb;k-Km)QM1b`_3QY55Dt#PzlMMl(CH5C}u6&9$~8iNLl7K=uTqQ;9#BE}uL5;LlDK(<8v|!dPjbhl_7A+N`v_{n!ts6$#HpbN&qfxO`V&4s~ z9eLKV*7%s3QZsbeYjy1B_TN{`A(+yYh7+ZF#}S4_H4J}}(^V@u`tKsns@$=hP8yL8 z3A|<*ZHO8>N7v1H9&1WF#{*%5!N(dCxn;MtfdgWUaa{-EqTA>LZJ$am&I;i^u$wS2#$S@ssw$y-iN|RB0!6u~JF>cOg7WvUzz-p0V zHx}{v=Sl?j2W&{^g?YIq38B;mqTVWl4ViS)J$;ht%7jomq+D%+!?hvpF|y-3d>_z38<$D3rN^s+2atUD#(&I^r z{1P!h(LTUKWhp$?9&94nn0xA9Nj&s&6^YMLiCG3%!VHgO+V=Sfa>A-SE~dtTqI_b& zYh~hX)dC?g#@fabX7Z{IIM!YBXDt$G$n;7n17_nYwoZS~W=rUEOQ>9-RuK2dNZlMdP)bymwplD!xODKXzcIZBckLAR!8C?nCZp-6e1 zY9w-a*%`6w6ed1}iQU6eRwhT`nD@5fc_*K%dQ5q-q?|B`s`~cRP1%fcQnHcapnU%s z;KMVy9=Z~DO-Qn5KWK{K_jb6~?IA z+l@h^QKF);sMa<#Sk{Xbq}3KRk~M>3t)jM#uS-?O6~BilV64*2XrZWuq91F^a*eH5DGKUY{R-Z+DZO z*~%A_W@B8sT*nurV{uC?R8@u5c$2LAL0gxm(o)w?3|cO?(CxYIQb%NRKLN7P%chM~ z=HzK#MoZVBO}xzLc`M9wJBt`0tg;zz#g05``~@WGPLz=z7OtF927R_IfWj?UGanMK zd*z)CvXD{8Q$GX*P~|ztv$2-R4KSC4%JoAtBQq2@DI|FfJ0{&cTaGgiwq<(7c?Bd0 z92zl4fRIo#=HE9ss7`g*bV+vluwWHUWwQhg0!0gy3JXtn)S;s!j@%uiXllJo{@LSlzWZqh}Rha`J~lnbB4T|Q8|@phXluNN%)7(Gsb z6n;n#OZriuXD7_5H&{`d2;&n~t5~}YNI@WGY61=#-Q~KQQmQV0S;+aXAdc(KS>6Hx zfBBLEP+mV8G9_C&KZS}cH4<58qQ_5 zhh7*$Cz2d{0Yz|>-c3kS#^O^5V&&e6@({RO;U|j&PTFr>(sxk0VQ zibMu^P>&iurmN5i$uVL?gP1BYkmmFnD`~c3d%|$d|bB?SoJ?D2}&mE3J}P{Jm9GbS;;h z=6vVtMKu8dsHfp8yyj<#N>!4f6tZK=7f=s)Se9*mODBAkk6eSeWXyQJRqCH$LDjh8qx0e#hkI4^kgt z=A1M<4bY1{&n%Hp>g(d2H$)Gd^LPc%UAefEAagE$Mw>Yke%hOwAfmYxXaSUUk0UY- zF#ARc=%~q8&*qrnnS`o`$8>m8V7FT2*$g+$@G4BI%iTFs4|!sU9~u`=9_1OcxR{KQ zN8Kz8H4$hMTnQ~3B@({PD@d53?z87j};E9bm&D`f0 zSTeK(PneGCMi2!~3ze_(rTcmSk?WIm7Hx4oD=P^1IJ;10?!L%OWQ;M&yq;fGvvx;# zu$p_I)t4z)u}$Zl!!wj!hnS(gyKlu;?q^`n9Ezh|@vGQo?U>q}i+QJUPJxFO+Ku|O&A1yf|4H*>c$0c*a-9K_G$iek` zhZ}h;>eH?bF9m$E2f1&Xwvdlv&Tls-Dll}z%I6DhZZ4^K=@yD+#`E$tY-Ed<6KqF==jVS5M68*j&OYS{{h;XpeIkQp@2 z@ggD0v&wm0;N|2=USqr=O$|FIzPKNzF59(T7aBl#JPj8o%|OXXQ1h=e+wLyDH3v%c zHtG3KmnM1(Z2T&x`qW(_kBRe3-7IlN{z_$jM89@vN)G_ekfYOhI0ap$y1lqiB29f| z(G<#@YY= zm0j8KPl&_Z`+b|GXgBtf^{{j&y|UOscs!nx%zY{MG^iIfrPh(X4ME|@9)a|o>DMl( zEF7%!g@gNHIEg)|o-FKm6;(kxzjX#~`-JX$gt?lX)^XO{kD0OwM5E8Q&6w|Ns*wAY z)^yZ-YtCf4l!B>qRP=6Mgkx3Dw5qDW)*XRQah2&5f+E>$HvH2LR| zw(EpJN@jiJQ85?-rBnAs`h2j8-ZN)NE=f+hWGlMT=r8#ab;IjcD6ZRyDDWvkg(ID7GvWqR~+p){RtcOsf{7 zYKpKy0FWdk00|NS#N|lSS+)i>OsP1fA#8}}Ek+i*Q4S1kzq35I%|lCzcySw%{+ zwUa()qxEBs4j6IAniFn2Ql@NL4Jo!4F(Sw}y91zEv1o>r{Hcw1D6c0x(#4SIe z#AzEt(KO1Z4Yc*QEGfI`x#%_4x#KCm=#rp}v{}UES!HynRq9mBPiD0GP5iYc z8nD~SsJU$}-Dx&L&tk?Fx;_=Mcc!j{GdQ9#+iI@#SWIKNud^9~L>-tdXrzsn^rD$G z%{RQ07_{V-6m>`2YJtb(M{5L;#B-Ui=ZkzZ31sRXGU2pR-OY=Ii>?Fq6i zZye@Z_WLWy3tA8>QYnKDxorNOlF2a!L-O_!uLx@@k%K0uAg;%(4nwXpwCLkzYQ(A- z-04Bp4NwT@uJpH!cTJ&Dfsxwv1(fYZ_{?rSdT|or-L=sH<|-qZam?{1y`7R_x#Sp+ zT@^YCaGJ8cM5zZ<=8~3gq1Hl|XT_D3WT`)@^K^-P6!P(RZ6n)RuKEgGUUPiO!a~J=s!b4OC+y@=Ih*9Zmt3S*I@G6IT(y5K)f}gG*cE z8Q>3<6AI@tN^Esm_k&}G&{{|~t7k}$v4pIV9Bt^vbk-PbD01K$mIV<9$9gcY5nED* zi!Y~c_iQ?VKC|1@M+mUIM_uH*&Pc`EXU>)hLGZtIcV(0gL%~jY-w2g*Uyy3K)uhxP zp!ZU1+|KEQ-7?72;uVTh>kzAyVb1DQ)9lw;p9Tq=5!N47XZMvYgRXsQ zw-RJZwXXMay(d*GRWvlvyRm!0&&;NyvPm2w00|I66-L@N!dk}I)flAK$+2r1)LSOj zF|1oriqut#*tRtr1+7N0Y*Dq6(Y6aoV_G$0v~7)&+BHd4imaBl#j#ZujTK`WqScKW z(Ws+h*tBeHTWHoaV{BSBqhlJyt<%xv=XKfL=I-g}@3|=n_+1U)gUvz-%w_x?kz$&% zuCPuPDGk8MnG9p*{s=r(v3k%*g_5o4qKKWlPV^N?4wfwH;^k@OOVZ-Xu_{@Hqq0y* z+_>Cf&0}Z~4gxvQ6tT4tc?R) zCiU+Wv;*Iz52HbRjnf3$-6b2E)8c{lm9Ms2(6$Z*a1}y@V^Dz39T0-3QY4rShc2mM zSTN|lO+N2qnOB&@VE1F>gbNnJq&0b055<=-nxo@` zxWuzd`Wl@14ibG#e1;%>C`DqFr5)S~9tAhGEjAFm({@qf*&{I1rL$z0MN46l0mz1J zqS=GGzXNw7I#Vkp)tj@zp?N1t6@z%uLWM?>amNm&9b~-050ruFi=lb-&xQ^#Df7gq z(=U5Fh5~7&8|bZqYfPR=C^!~{jTJnIb|KPt_EQ~`>s zsJqe*FxO;)!W?V_qr{bicTsx1X?y%PV-UnwQ!L zU7G>yQ_aSGOO#6Kyp?@IjJlQy4KGU$4HbuWAPSQ#d8#P>?6cb|q-hS1Rya#lW7PB2zFa4XS`&wUcrJ6)PkSXLjzg3yPs4aPQ|wtx zsU5NGe)e??V+^!V4iO0gkO>5VNRSGkqiQix2E|2+qN+7w zqQ=@Q7K;&VRU2$p#i+@uEo7pNi(^z(v1=C57Bx{+Rf;W8MWbpcHANQDv_`1gY+A)* zQE1vW)M^7!sMytuSk{t1a;u9}TNc@xHMTXV(rAi^qikzYR2l^!IMs7L7S?v?;ylJ8 zFXMONQ<=@0u#NER;uxkf>-Fy>0LlkQdhSq40FCP4C|3YfC&G{6PYOwfF+`*9UJf~d zILL6tj1LSSYqwIC}_mhq{x@xQxT#dBd%?H-elB$;gQG+E`3indu4D20Hj$x0X&Jj7r1^QoUftuPy9Q zjWrZTG?j8i@mm9`#B-fd!Sa)&5_4j_$r|;z;V-gNrs)wgyr{-U-V6h^z>}x9Rya)P zU5G_Px7V@?rJV10yy?uTI4rPjH`4HH(2642PY9utDoY+&8QiZ;;j#Q6$`{&KO(l`S z?0Q`d3+8zjx(_IHM11M6Z-LdfzN-?TZh2*Bl?%p=ZzV=ZRZeR4sc|?d?OZRnCIynb z7bR7e4#hJhYyweewI$Bpuyxg5Rtf5I@Pb&MyG7&Jh3^Zb0NLBc#G(&G$!V3>9Z3V}~*p5Rgh3ju0+R1`5Y4KHwZskK!4BDbkPq4W2R z=zF-r6AbR^ySv=2kK(O_kqOo;l5Wy-dMvzZYg-7b0LSG)=+%cp|vWC))qb+H161z!l)NfS<>%J zs*_19VHY+rN^2+xh74&%zLuV*2el5+=9+_X=1?IX;Nm^Li}UVEgWRbQqRO$0-hAvL zX{9VvhA+kLNTb|$R$OJJYe=+3@K1S7wqsl;lm!Vrd&KKz`W@o&yh8A{m?rlsbOYr8 zg&lC&Vj`)q_*&<&w&343)EfX|?w#ue%S`px6C$3Ay#h}Q7+I{3)1pIkgEi2&-aejw z-!FIY=cd;6HY*zziwQ}#uxgA^RBfWvRYkF?HE7#WsI^f>DKQo+XtfsDv9W43QHV8* zS}S74);83bqR}O>Z5r5(go>gmHr6VlqSi63iv_ASEfogU2B@~E)L_(F*wKnDMYLFp zMU!J=MzL&BQMB;*HoO+Q&RoYBE^@hXa&w%yaOXL~Na4B3%yXRP=PQex=Nyh}isv}V zCmci+NWn~rPFnT-+wuJe<>z0!v&GLGVHd zDG2!EKt&CT_F@oel<1yu5=fP_;Q;_z3n7sc1|zm3B8#X63#38HCMa(#K)a|u9OLBR zhoNczkQ6egE<-voEqEE7ia5+tvNBBAP6ZS%r_&@WKxex7VqQ-;y4JsTskAA%JV6?z zZUh-t(_=-ngYDmiIm-h_7zn_^dL?41sM+|Wm3l^Nsj4Oh_|dZpe6iCLo~#OIXUMRz0{ktf>0o?_TbK={`rW96RZoK?{DN@!xu- zGpAq_tFGc82TXl;l64k|fmoiwM5uT3bDm{w;_r(#JGdg}6?`02fT9B@k7^DJb?yTBBdshtp z8eT^^xaTw^xh6zmaRF881o6enrjgd3Lg_*b_~UPW-L_V;c^+2 z^D>uafcw?%gb}9mDZMPx+ZFq#|6QLWxi3wd4=}>SqzY^#q?^u~@+i0CW!Gj*)}XPm zowBm{fbmx}v>={RhtP1|YW&8T)2r*82-~-+$pr`oaG)_zFYYhoIO z7A?XA^-(ItV~ZFtB8#lm2FxiW6@++d`-&!#@PNF{R4NH7HA4>!Fc9Liv2Rq@MdVi} ziK|5ehQc(X*$}h46Tx-W5Moz0$w~|K2Pjf0o6YG8*w0k^*z0-fetXoAE z(X?ufjT)$|YZ@bHsMak}VzC<47~2&^trkmL6jqB+8(VfRDmL0E(NtAMjcPV46^&@L zTA4tX5+gj(Yw%=yjgg)W70r!3X5JU&WfJu1YrQ27fi<46|A5XcB>2C>xOQs*V< zUuukDvcE%Rb+5M0K@#%25NYY$4gpx&S%)z;huu9~<|2{B8HbFvcb4dA@;w+rlnuMG z{75e;)*ka#7D7Dq0n?Jg+}n>JExD`VWXt2h#E4LzEM8e<(TOus?%kcO?Z`lc>f|Av z*`F}l*i7|?$}1+IvUn9Q5317;#;iongd$gIA``PN*w@0rJV%1@c+Ymkctoc;B56QP zX8R*Ou;H#AFs4cdTd6M;T$@DaHDoYxVrJ~k#=w*^DX4~{IqWeS_T0!0NX#IIOGpZF zc)2HP1l~TFPj#kj7&qk!kx=|pgNW65TuCM;MDOh;cWXE zLmGM_6O?UAI9XUo^}P)yKw?jr~RIw3i@9BhGDX*W9P397$# zFx)t>q70jT=Q#6qwO!#-y=hQMQv{+BS<8qiQNOsIX|H+hZkTZKAMh zDk~Le+QeAeD#>h?gJRe=$yzH=RB9_4EJmoU7SXn&MMbvJ6^vRnqiHtS+M{ZO+A77P zTSnR_(pZSpMxxtMqQ#9=S~ahr5P=_UzD9@KQCu;F-1$m9JN9+3& z_Txh(G9N#=9;U+JwYW+a!d}m2HHbzv0bGYhU3u>C#XLvMu*g#g*W<`=)SJ|B)H)cv zSOyV5yfO+$=Sqf>RSzd$j)_ij!4*nXc>%!Me?Ps?W7(1HED&{ssaKVfASglKVlRd< z>h*thR3ccZvQ4u#QP7#Klc47_vijH_?{U#8B1=03#V6cd3xKX#T$3pGFY5=KakJzY zlrl`p*MWEL@KWaMO-DC+eW5p+7~2$SGOTcMN&>rHd*a3w@(i{FCRx_vy7V-^|>?a6QkH(aQ;&S2OvA)3UjGPS`gXn8XXy#C}GvcPzpA+kWp>$$h zQhK0c2N$!JuH~InjtCy8o(-V$PJCyRo#B{z22U_HkxzzhYOw3s6S$n2lQ@>yI>uL$ zjFb>ZaEKQ8)^co!Yh&QMHk5;Bt1Yzmu*z?sh<>( zPWouhbyf%J&WtU}XNE%2VxXIdctYlPq-b&0uSYYcHA6#ZQ{2i>yWY)t*0U8zD3UZd z6*-IDtnG-%L?ro<+pt9{=9sf%r(pL@I1m!KIVqcKfY6jTypy0p$l} zioTi!?^cZACRk$=hTs}ErWD>osv+cA@2-my)>tJ)7@$pLuDWIjOD@)y){Z5B-?V&D z0DP=O`EbUv_D)O|&17(-FYaxq1xI{Fv4!IS4l(qC;Jk_AQhVh@i2ZQUJ}lw>?^A7VC(Qf)hL8LgWz;D9@= zZAP)8*s-k@8Yt9ljiS&{YNJ~eTVUIIn{lyGMH^IGV#cDh6{A#Ii%GVzXw@WE#@fYf zTSn9xjBV%e>k)q#5q=|j3M=xUagu`A3SRWdZqXRUsPIlNBK@PMo(|b$@j%7>H&V|c{Qb@=V z3h0aK!beqL-;-QI)f%>AME8NgCs%4QsQ76~b6l*vV&zH1l023-69)N&3wn+l!4P6B zir!K7ikxtS3j<4>5c(WuQGuf&_O!g-nEch|meVlhG%_8fP?^t*N82qBPl$7Ve{mq` zoqbUNt?HJClTpsKwI^iI8kbs^*H>gU-X33W^qk*uDeargVRI-oOG9~ceRs@kMIj@N zk<+VI`<>oI6mmhbrjjwKS*oD|S(NotE`!LK8MO`7n`eXv3x)o@yQtc-q1oF^UV%B7 zhZWpL-fAqbc_DT^o3z6t^kqUhxj&u`NK+bc+O9W<1yjj9aNWygz+hO(G7mnvAQAm79^^>+}+$nv~lilb4bO`IY_IqVm$1d_E_kNqWq$X%YJqV8Hc3jmaz7zRa6T_LJWz>YMO@fYP%(^7a2M|#Zp zAY)zvwe5I40^?TCQ1dp0Pthttdh1dzE9P(~Jaq~}=d|t5Q~9VQHw2-kgnOqH(c~5b zMT6DUmNHRs2r{>_a1H_%Oy@y!u!)?W-W}zJ=znm+O+=+iMu#dgRf%F{OWt0Wt83z8 zMMkk|F`~s;nxfdEilb=ON{wq6BGGLTZAGZDs>Z7rHltKq7_4BlR*g|uwWCJGlVa5s zXw?*Lv8xppv1=P@BS$N2+N+C2sMsu2TWGXot&3R3)fUFuBTz=xEsYVhRT{;k5n|ZY z7Wf-nMzs}1qN}e=1bV5RPc_Zl{WC%RwMO4p>OCQ7O=Bad=J%UXG$~cCH4J&j!4z%+ z61cxh$gjFz#m8Qt#hm8ie5M}8eZfoiNt)~ORL#f;c-DmQ zw;a8W-Uom1J|*H__upMwu31Ehd^Pf6!X1~MgK3%AN4+?E_mmMPk7K@=@3p#!W#UCEO=VL(ZKA#azIUKmlheI7o<|iV3XYWuc4W{WR;xCuuf!q6&XWAGv93QDh z`rFm;lLT0~1S^@tpex(>oO~JG@08Wo@0+~u^aFir-sb_E`4&4Sar8fIr_A@>co%Vm z1|rjukFijo*Vr?^e0oZrma*@9DmBeXSwmdD8WTklo{~H!{c(mNteWp*LYjA)ACm7v ze)~(8X_*8aP~Xj`&$r?R0Y8;;qBkK^!{)Kh&Vi?ilmW(G1-V1vHuHei@@Vy$Sg-U^Uj z_Sa|ffSPT$(WCH4J?uqdbu0m+SIc=z`oR4kO&V(bFTG0-?$0iIU&@@XGrXp#@HVyk zWzINI75uu&wNgvj&fqS8T^==!3F}220fa^oAur(%pe%bv_Zb%@+-DK<%#78LyyxEG zu@%Z0Po580yxL%^)mo72R~K+M4dmg(P;pCjfs1l1H0odM2jJGvA6Fw$hAv3B$>-3~ z-*L7yjD97158UrO`*5b4*zf9=$o7et12D{cJiZP%~G?AzMfENxP1 zEwo!fTQbyZVxwCX7LBYLi&(LUi)u8LsKHi{wl!kWY;79G(LqsYsw)MyEn+Na)K!dH z(Nt9!wx|uQ*;{3|G$s~cdgy{fMcy%o0Wb*pID z*ZuFhbk-m(qS0efQL0|RN3!#dd<^liBX9v21Z&#Bhau&OeySf>1fI51JIMf#u#$ff z+tus!`@MfhyXW&~eF@{{eSCL1{P&E8Cr|N3%>M>sCjj|sN(Wluj+ZMCMCW9fV8r8J zOym+!Qx=?-7Cq60hsJ3+o$WjoGl*EZ%Yic56rnJ+Glf#3qLW-KlxEenB^9}KKWB^H zcz5xguel{fu3p$xflr(ll@(~NOI2QcMa5Hn;HAO>fYfQeM=GB=2qjNMH;_6H3{*yC zw8~Fi@_h0P`Q9u!f>|_>Lf-;BVx9fP+C*#Y_GrkPJn7p*Uk9|L@&yGh63x-^p0SV3 zZ2~9_Qd7axqSUqnvqFa6nOZwPS}Bd1X>Ri1XM`Ai&=CD+bq~8RMGSZGA}*9aUES0I z*Ko6V$I(1{aMu#!MifPy549U)IhKKV1H;V54KSp&?(_uPS(aJ)lYOLNGC7>5XtaLI zD5Z3mvq^Aw*VbxqT8M`VSyM zk$3h87c!QE1Fc|jV078~fRSbmXT<4?EIN-O>VS~pNL6((;;6R1?%}BmN_j67go%Wc zGK`rBtUb1pW6IdwBslJ=YJ#CPq;gm%FQL1tj)jQmSE(eaZdxZI!*4j!k{3^heMAq; z<5d1FC(L=wKq%|V zmcx)WB-i14oh>O|OY-m2$TW~Pr;zAK+?r=NgWJfKC7?qMrb&t}oSL`Htgt;T>YsGy zQUIx6CPd?U&RXOIk4abudzUQxlt`oq9J8upd9yvF2a#W?UT0#C3OhZiiZg~VO!lG*Zi&N4mf5B%_VrBp%rPi9mCsIkj|DfpDeBfEReK3xtgQ>WF0FP!>_GXF zL==H35CTV7+cRjbV^%R^RT`shMkK6j6&j;!MPkK`ZAP?hiqUB*g4necsMd(JV-am8 zwlTFv*tRIKqS3b4)fm{=)uU0i*oqAi5v@kW2GLYi8pbxnZKBaev}(rM3uA4f(qivh zjbmChv9sOewCictpu9K3TW(!B*KKWeuSdyV=VoocPggIa=e6Bi#bMPQ?*Nlj0VM)S zhqwYPUH&JiYXtyAhwStQ`m)!K*Kc*>zN>1!2P?;G$*s5KwcGLDF3Q6WAmT)j1PKs= zLBY@jeMACF<|3Tn&_HdxcCFUwP1BsKU0YpW`Dtsmw%xk(_WFSc5Wt;}fFj3WfMNCy z^`M{xc9Rl79{>Re5xqnZ4S*wM!G|CcV2C8Sr||%bI1uqB1WA6N5kcK1jX@r#csvn5 zH+XPCCqhwmRUFpazH_gd*G}5C(ek#r)1_^?c3MeCxfYm7i85%wbpQ!2q=6C>1St>+ tD}YCcB7&J95^e}^m?D6R1`tKP-^daGumFMRsGuTcBqjmJFM!k&SH%DT diff --git a/vendor/github.com/pierrec/lz4/fuzz/lz4-fuzz.zip b/vendor/github.com/pierrec/lz4/fuzz/lz4-fuzz.zip deleted file mode 100644 index a7783c029f5a7fdd1c81fb9157f47247f277ea77..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2149434 zcmV)@K!LwdO9KQH00;mG0000000000000000000000{s90Ap`@wd(L~#?9Ofy(0_m5|LgZW&GW#_ z&Ya78-rITK_q^xKz2)LGtHpwhKP&n^BERZ%Km7Kx=k1vy$cxHQ9`d4*$c6?Xgg8?< zFS|3YO{|`AMNWF**uUTM^!IOhZ`O4ID{fplb|uZ&k@?S?b=_7!H0x^gXEEAnWX~I~ z&blsb8j^K2*Bb=C&tT85$+|Xr=)kH=Y+3bYeiT|T2z~QE2fH?o;BG|cpQ5no&vB{( z-|)|39xM5CGQ6_!Y`bA-R(WT3b>w69t;T~ierdhLFSxAo#X0o}3))cMzr=9b(@;xJ z`J8froc{L}2HBO?pP!#yKDYnbIrY7xgX~J{H{_Jh#rt(R_1Cld>qqBeivH!o|C5~h z306PB>L)+W=K5Ut^H}{f4%2)}`F8%6?DDzwPtU1e^ZqsLO6xmsJ$Zc>!--M9F))W+ zN5XYYo8_eSN9EM_4x7uaXTsGr7M-;IvRs^cX?+x={l9by{&_Ly*-JiJ|D~C<{zErp z*T}8ED(Bf%|1*zXUvk0q#o)>7Cvu)0`}$gXwVwsobENF@x%Ic@JUjE=`Skh#tv~#^ z?DDzw_vJhrf6;aHdga-0y)vhKZvDq{p8b^7|0%0~PJUii`P}+n<~)-wqncm3`a5tf zeIdJiZvC-L1`PSh%qR5sf}HYP&hI={>R*3(y#ibZ zm7kpc|NZ;#-+%x9`}g0!|Ni~=@4tVD*(kHw^{U$ux_H`cldrky!n<$2xfFXsbN%1* z)n9ng!l_kr7yh_*dg=G3e{aFOa!+u|jM|Il&R%@Ulv(qpPF(b(?_D+V266H1AKh}% zZRIlpH{5=G^_1`pb<_RglsPl5szzA$3r;Ng1y{=!^0xrX`R8@(x{fdVk#o^h964Bm zrCWqztdRd?zfg)LP9QgBq?QA>`2{C2X4I}32wHQI=Uyz6@{x@tEvob?hf+^%+46th zJ~7PHI|pii+`rvEak13X2Wr1#gQ@+T29+reEF&z7q}_ah+>~KS6da@xr~3Nz{@dJ= z{>z4UT7U4$4lH|VgR~C=cJ=xJnpe*tAV1lseSZ0Igit?ye91czKc|DAkPHvvI`1yk3XCXt-pAkX}|AR0@oDp@F_`idpUF$id zePm!Ln?I{@#MMY=1AA_@_RfG9Xr=>$QBUns$m1V^4bp#Z$QmAwO%Q$~7q z;mF3~7IndT4y7LYKkCoZo9WMy)8DAx0s8xDT|fQt%ZZndBL{<|PfYs6q;@rt!B__$ zC4|Rvz=7qtE-Vknu`c?Ij|WB`P8Nrz0SKh`ETq>Eqyx2NtGYmq zsJwsMR)CUOt@wqS+-Fk0pmzU&V|rgEnyQTh7Zocb-g)OVgrzFRRm8O=fB{1ltFsa| z#1xV!l#mx?lywyQj-&aMC`wv2g`P3EkrE_dpwG8lZAJW0H5})V*4V4-IrzW+cf;*Mc$u< zJ@18w15M-#2lgcS)rlrzthf`YBkA&F`U0eW$vYkVge1Q@$^RxXI@u%|W=;0^zA(yv z!8;ubPg=of-^-D5BjwaFhyt;QE>AdSUw$l-U~V11W~#31~8Z;3Ql$VHV*M|zu-#kR6qF}hf)v! zJN<1Ns6Q};<_k`>#e&lPE&jjI-*A-aFK>YU_~mB~pl@P{&`+_9U09y!z_G5-t$f@& za!qmQ0>Z2{hP>5gR08SB+2j}M`se^UTc5mWt6!+Im^B3_sI!6tXo{=eH*1QbY)$d> zD+6i@_t$+i#oJHyuPMe%15L5@ISy%?GR9J0{EZqIf45Vq{r?n)r_C4uhqm1(!C~#o z1LE-0FZG$JgWRa)6Rap>En($~Nwmttr}Id4b#VwQSGeh;`^Y0Zee`|$=w9;Zi%t%u zeuWkLnO~c_DK@$xH|m}XNjLT*U%3#Dv|F%bKS*w6`=^;F2k4UzGEa8WC+}sRybVv% z#@)zgIix)VILDELE`nSqL9Ua~sn{?dOQ?zX>n=*a67sAVuK*r$;>bqOz$dBC%+Gn* z_2nv|^bqEsA4X@YvaHsiyS0E=s_2=NW zsPHI@w_qtMJPsBnfjhFXu7x~#6o960Q1)92BPWkfD6se&Am&6itLgrqpTMb!nDsU_fS&>QcFV93ps<{K9zIFN~KcmI)NqII^*( zCF6%H#GWVY_75oF#GcOj+xzWGBwZ0>sC6&2?lM}Bbe|Fa)wh6d;ob~%+I2J-K&;jJ zrvBL-EA|`ip~NUnPl9FXe!OYWFjlIB@noh7lDkqY6$+~Q6o<4Q=+v_vcu=43$qY&x zL&>uxn?d-vecq>p7JT##MAbqWpzfClp{&RT^QSp$sKOuPG;UEmrKO#Z9k>38-EMuQK`({0lNf8v$NQ;#eYlFP7~$oJaZdRxAgJ zz3JYa8ie&0QR={+?LV7|IsZ^H+7bF&t6y+hTHGC}FWh=+8~KG=++FTOYBiQeyx%kuVb3;x*&TrJ zQGbWuJNd|u=BN3J-ze6rk8y>d}C)EP%*;!wXWv&$l^gSpUbYag9e%blDuJiHW z0)12CQ47D~Yd%>1 z;gE@Ga4D9DVNY{-h*_J=3}R1n=u>&AiV%t=LPN4h`&xrZMS0UIsw_uEef(s$it2l3 znh$dw3OrmNC2q(`MO0Z=YmO6>(Hek=TCC_9g{*&Kj*(0@Nd^KczHB?W5fc=)LPtPuX z`Eyo;@VH%Aev#^Q&%6Bc)mCEc){XQYZuRofYe@-t@*AUxj2`BrQ{eZ{9az3ZBr4h6 zL4-9~Dec3a9$wi5QkwsErTif2q*DGBX}40^Sm|lzS3F7XJv)8;LoMae-Qgcr$`7uh zr5=Sh&5NgHUWe8M$)hliWk^7L`Nb~Y6~RQ0#Pn_XX!{J$|G8rjzpE7c0RJ$ zg3t_^YcA*G-UYtKZYv+%2D8&@ic+Z?CqJ`^vVB2xB*ID64xM{-9QnlN?nrR~CEySh z?kv%h(+OY88RO$V4WI zf)G$RcN7I=&JB!t|IrL%js>xiMM1zagUVn6IV3blTSa05rGAGM`vZe`(Bf+E;~Yx0 zWl%y`w#+n}ddYu5tQ3@oIb?Fwq!5ve`9u(_h$&(S^s|q1CIq{6_hIc-h)8>j&GS1E zkog8t&PQgVVCf5O0aP)bsDsCvW*8hf=q3DKM))0O^lCE6B-KG!YR1WZgYdW{5v>|* z*-SdAA%hQM%NOK|vE>k;(tX&ir#KvGbzsjPJ`yD$9BI80ArxNB$1lZtGmdpE8iXUs z?$mqyE1#hH?PSa@X^Bus&`?Z3FTqNFY1D${{6P?UwDe?%aV|FeTku#)C`fWhaLG%A z!c+_^cB`Ec;jZ6vD7Cfovy@B*U4|nYy)FIPpDup6*V@_JH~tfQX(+`gha?95gjO9 zE|6ai6Hg3c##aON+hkS;R3@QkDw=7&vFA)aRsiG1UbdFwEps2n@~dN zC=`oOICW1 zQ^_#Ar!Ta!-mE8?JH>wXZ%wvZKADFexC}xjj{mVC?T;tGV9oI_{gl75rc3}l0LI4B zqX=0LAF;yhF?U4cF$ce5mxYc9vJmL-dwm8-mF2WK9ClF%^!(wmHG_gv}SZY@vPY@-yEwxkq zWr9Pg7r^RU1csh(ljAyO0XprQmcY?Ir(XXWc%I&E<_z$jt`|cimn1$YH`hKj{T14(~U@ZuVpCJe^F{rEX^Q zzrN1INFxa&P4lo<(g&tjW5}xu;FbE-Z#bm={zM;ty0874j^E3gw*T?u}Fv?F|(94v};g(rcMjCFGDJ@GhX{;mUAWsM{$RmEgkIDyDs>z^Y&0zK!$}L!o+q{_8 z1>e%y>UAJ_)J?QjHIdUgD>XBy5ma3rwaSwN`&>v=mJwf5RPG~hL?!5q9p+;rNNEY) z;I?H=M!=`d>!jf!V1^|`llq0ANZs>#936z^`k<)H4uWm^a|=R#dDI7cNiQ`nQMt`o zPC&U76iA&5OVgZKni@=_3@i5T2Tr3*4L12;D!HyR$gg4>;aYnYb*sHtZlcb2l~9H~ zoA}rTHiYDY@Lm@pqn`1R7YRTcZB|^13p*mjWQ0nhgCdMVf*(c%#Sv$8JvQS`Ce}K8folxJ~ zz@gNAX&&?z5itl@5z@v0wU&^HH|b8O&uyTp=n`Gm1JW^3`XnH=V(F0j$OaCH(r2P{ zNL>lHJJcv#_iBIY)pdFD!aW4})hMstN_zv+o9={K z4%I&qB~`tYwKx%aKcZC`McdSK;nmiF)Z$L4r_*v|{BXCE6y8ZpL#f3Wocif2q>-Hl z3u;T?eKTg^K=mznzt^2mTi|X75Lu1Gk8a>=Z48*3xCG0=Vt!q<3-fC?V;gp+RW6oS zGLaAkr#kOdv&iBStS7MqwTbA4iFlO&E_9P~Ir$MZ0F9M`j+?1%T}%3DV*S*m7dniw zP(Q{9X1OxFJI*pd+p7KyF~o;MkX^F1dS&6*u!W5Wr7v^f!*mj;i(p5PPp{~ zrug482ZGKdU`)K2y&CDAff)kyj(jg7It1k0c?)5F)zMh`=w2k<}Ojq=oKZ0K!% z!Bt2_w-|nvfCTKCZY7O9`(SR+>JJlPCc61y4ry=p^p|hLFCR+d>3u>nGA2r~mj{2i9pUNJUXrL@kY|>XNv7DC;;ONppu^;XR&$8= z2TyZUThrP;h*Y>zZNz8*j^);atit=$Q`G2$(4QPAHn9rWH-t<5MD*>i?{cA7B78a> zRGSxe^=KD?rMW~Xb|=)Q*UFar$b1WKC|t!$wzmI%LvvlirOi`EB0NnZPIT1nQtZYar* z&ttmwip<-I>9_6hR=r~thqU=QCip26cBy|cf=vjy>9i((SNy^ue3{S5FTa->;dGLE znoeN~yUJ|Ejc{fwGsV|{QOidc5W83&q5YlN)Ze%Fv8iDiROj5EX$eFnly79Ly}*G` zVJP=mp4qmZz?u4|#h1Up_<~;-B=h_xnCIoybTu&66|SZ1%qD*I=j+W);8Hr*mzw8# zYFtMUxcPNJIs5?3s?U)Lxiu<0PbX%Ekfj?}!30YK71hO9Dl(@H9e=^xqbPW_o52A! zuoj+>>1W^l>3$IF2j%vYgtnu1+`cT2{j@_T9m8Hd{K)iU+oo@+1^UR z2zOAoV9R#u7E~%XQMcei;)Y1r6_9+yE%2t%hZXxM+3XhNGq+%5#2PkDliK51X@I?T~*dH zPhh9IY%R6h?}oB_wHJDI-7gnh+m&_&%F1IMS-wCSLAZ8bFKcvf06O!mWxhbWI)Sx0 zhP8P;EsCWh0cyk(#!wwls*Sn@$DRcr!JSY)hr4Z9+AB)CwZEUxb#>3Pq@80}YQxet zb=$Ml%9r+Pi!y!zVPo~BXAQsLuW+|bn?On^1uL(o#&`oU#*JJ78eTBjHOG zk4u(sApU6~K3k>&Pc={?T%1$2AIdYp%$A?}_ZJXKZBz@M$XNWOkoN3RShP&XIxvpy z4vipky?iOWa)OM9kLo%^M)=sxM|J&FT7~!!ppO#a%T84mD|Y_5T#vv$HrvWy`J~y( zAKBN+|7r?Y`R%b>D}TcnnCzkwhqUW^PM*KPFaJ$H_C8pNa~%+OOu+zHEHn11p@gFr zdDNX-i3+=`n7nudM3>n$vW1zOX4jyYxCU2l{XcOHw5eIH!KIJ);~Es3Tm$W|X4k-- zPW)p_fiRE-cSA&vwMfI?bHLUY5E~eQgDYG;H!e(nj83!$@yQ zudBB7rSH24=Lo?Z17Ckmw#)c`Om0g4?Dt z)paIrn*x3C@p+Eu?$DW7KFghWo#^*N{PQo=S`gw_w?ub`{z$zGdAxul8y!SJ&HD=> z$5t%uRA<2L5qCl@{|kpwFUX@VgzoNe#CC@r!2OogV;HpAcWGJmY*JDJF`#H>npaALU~Xq_1+ba53;3yZN6gPARFRWCsN6i%afhTQ#FyNAd#nF%C0&R9C9bH zQt-%T7kY)^Y#7DY0ofkgOuPd=`g=r)UlT=GUR8ZASYZ79k0EL+tDP1a`=`bRxq*Ff z_q{?r*4ik-_G*}v)0)|SUExT!W+GDMj5EsF!kQ_uu#O?42gIp?S{bICBd_|MV^%tf z&P^^vvlfFmvaz}a%d2V_uq$Tkx~|P3ev&Q5A#G2e1hVXW#FQQUNoe-&O?{DrW%~yc zM=~=*nQyh=rN>koLcH=v0@oFA9|1mA=2@^Z`MTj`yz^UKSQ&NG@R2BL!%Ce6FYP5I z7vGKTXCBved}Jq?5s&P|$?hw0@?#qw-HH|NV(i)R^MIirN+}AH2W*6+o$8lhSlFuo z9HJr>zr%th`_zdeDg3W=7-(ThtkTic->_~!f8t1#`iI&w81%9@I4eCU+ka50{{Ugh zr3dM3`!o%9DeT(j7pe{VSA%1m(9+q8SiZxFrAY!?t#)7~M112=zXVs-Q(Wu~NZt6z zUYyjg3`m_Y;L%61QZ>v4o`*ON){SAM&#R8=NgIGh%3xK}g)Q5R-%$&e9|Yd@Z2OtV zpwKi}`kLvATCozeV0k{N(}F#__!Se$D@N7jmk{_mW^D18mu>yc5B!~Rf}c&znoOu81{|Koc^%S_Un|AM`c#_ z&I@z<`Xkni3DnkD!w{kM_UgJiG!OVFL@$pm<&d`i!~pRre);cFZoW5G?067`Qso;F z!Z+CSTV00rRxlpk5E%md1Aj|iavY&i^d%{9wu;6)eA+F92#{VVmu2de5B%OpZ zo@QR5+qcfd@*NKF39zU4=X?EpoV(1|*nLI#@6@#>+)ad}m<6JWnEgTRgC&Af9r-)9 zcO{w#pMj0sYcJ$pL>v6gNv}D#o7n#oq^)V-r5|9!~aLiAT3ntzl`WydqVZ_YyFc zGHu>ce_>}Kjr$%kVfGAy*_}l|`#;>vA#F>JhBfJvs89oVBr4@ns!~9-q^AX!g{l;| z%2D7+y6?q_V;$lAmj?rnjyMrM4Pm+3MXbN*T~G*C2l*I8bd*;GdY$|~f8$}n@erZW zA+_r%qAYKPe4utjod(Dcg6$bWCwZ>2=ZEO?5*nwB3Jwc>?zGS)Z^UGAJ)<&pP6siG zagiH!QOw2?%8az;Qq~IfQrSqCZX^h3!O9Y$8Y`27*+7pnfR?cn@)}_cf)WZBV>wtt z8l?WKk+(n$J#QI#z6PFC+dl(#nveRz+#LN@Fw4~?MkKe4u&g)JiTxK3oBj4P01}At zLR8c~aS450LSB~^kj~#(K|2q!5cx{hd9>fo)k}WHA?*)D&1A=$f1HzklmThMQZq@vib}sF?wd$-F_!Yf2 zgqHTKw4QetzhaJ++#kF5i3$9QpIXTMn}hV>d`twv9C5~kd_*)@?0lfcYG8goeik1+ zj*N=ov#?U|%tfSNl8?QQ5c-05TKOn+FR{IM>Oq!p<4R9!G=58cX7#nv;70EFiAl$R zRr%rh)=b<@;?hfIKvT8#e5lr8t_D-rGpyQkrfO-yRrL$3NZmxblo`5~ZX{T2`2zN1 zs27d4l0o=|>Mw{NtllnIk^1Y=R+M_1pOesvbzN_oFPx|5jkbb5KCg)r)Lq}TA~j43 z{n^0O2AD3l_UbxCmGN9_hey{>`TjyamZU1~f=d~ysrj8ob?7!mtJqq|2`Om45KtYS z{w43Ue#tv4I6k(zSJ%HJu#f+eA0`T=>aE3A^d%X=_%Hc+2^aN-*Vb2?HlG|d;rsuPOoz{c|N1n{Xs%05cUb^syG?Bd<;Gf(s-J5`T4}GYzf1s1brQW)r{2U0(s<71^Q=gHmlVv)9YHEWyH11O0N`)` ze)DsD>@^DV%JGamkD16rC=go65I zIgG=+KpVIXGP*oSxNSUv^tuyix@-XQPEuE&!w5SmO;~l?Xe&~WonuAX_!F6-5l+1x z2139~kF5$GU7=QN<5%1X693SSiq{M>S$)rJNR!D`nnNZ^B`6;oeH=#n;`hu{%0Eu2 z)Sw<5MUf9#rRsDtq`3sb`;MonMC=`P<0#ndP=;@yj5djo=^|3H=6EidmeIb`Ls<+% z8JifiB@OaN7v+FVBZKHt%K?nXh%m1iWkuS1 z#|(hCH_aEusKZB5(HMjI*D1(L$>5fdvdSzyR0pm~>%jD4 z?)?PI9Yt289X+b+04g2hI>^mcClpytNfN8RV@2w3Na-y{bA|uM=>et3I0qiR-T2Ny zKK3sx@u9ooK{JOJ3!KFP%@0fjrkG<3b zgLW{@=LTMToIGSfVD)Eb)3F-4nd(6T^CxFpk>)yT5ODKCX})m2I`eER;@A0w^Z9j@ zfX@;6*+9x3>|xkQEhA-1)bm)`63o9r3;%*Nd_O7tMo%s>F4QwmVCRwg(OED-*?xRl z&N^d0VVbK*GYvhuu3i`7kml;{&wo;)f;VkH@3+B}?`8IL8MB|unEgzjbAtNXpKNT! zE&E)Mcb>7az@D+)%4zURI%eJK7n~*P=7%{}GM3<1jPhk|R>93Seg&+&OD%X@JC<6FSjXsg9O<=%GWBX*eS3G01@h3u)XgVpMpsv<)BGg_B3G#G|)kCI6g^U2Gc4I2C0L|z=U2GlrjuVm9TNK5&#&WL3w=ww$4%g4XIR+ybVMBxu)>td3!cid z<;VVp+VUckLs6o3h)8rE`H=u{e=F=n)a>0yuH!ZZOhfC$miNfWx>$To&e3w7a*QftRB5Pd$+mK=CaeM!)c^^DE3gJxtZYzA+r@|+RLf_ z>F#VZ-eCI)(5BjUI0wL++s!Szp~a8^;M@Tk+g|{U-Ax-)jID@@{pEZ6gL73saQ5ES zf78op(=+Ngq#Zrh|M>$-R8V^jeeeVe(J_6nis=Kmw$Z+kv2Dayyp9TwGc`flT9w5u zHiKJ0NzLgsT*FAWm0x+3wOV2+kB(9S0x+5N=WdMLDCRGdUTII!bX=C`xkSI zgdfQu0E););DH4qFtP7aB}keZ@GJ9CIvQHS_Z@W$I{brX%B zV?;`@$4x8yuW9&H59)f_Rd(T6N2m}(1P`VJ(Ho~8!+DnjI zakax2wTQ~qPI_@cs>D#y-5z+^H{#G;s}7d^GeD{)6XynA`PJ}UzbyFUcgF_!waq?= z7EhOjvqksE0esRniGmlx|%iaQw0w3*0&Q+ zf|L85-=s~8PUR-|07zpta;mGAav7u0gafthCJt#|9EbBnWP9K_%HIxoFg>M8;g%5q zU~Eae{-kg)OpgMH`=tN`q_fYD!V2c17AzOMz1D?TpZS~c#g2cwGa$FC@A+TB!Hq|q zciyxB%SCKA_qC<+3vjsM1~}Ys1G)D}hms!;^SfJpQn&j^@?UxGUcc1lOR7$v`;cFX zz!`@XpTjER_5+PQ#b@pwy-VNZQ;N!ByTU{Gb$E!LTqxloEvDNoS${eD=S`}AOh-%* zO4MR`W(ij2me4()u`WJ(59p#homjcs3EMJmwo+@euAE*8ZWN&PnX8U|@RYLzDsd)9?WbFssGY#(TRx&$v$8$4f7-(bPL zIxJ%^J(sSLkN(9%qkI%f0^AG>ZIuWW`TaHY5-7p)Fhb@ECq3tCqNq%@1U&8c)?wvp z3zjMza>2@H2oRU=iFJghu#AiRJ^Z?06;ZGaSXnZ>JDMktxbYb>jPM97@#zGi^idgD zYFDf7<523ubdE;(eH@~@%&^p%T1%_|j1~LuTA3AW+W#mDE)k5kWLcK+M$hMh9fxzA z@`w+f1_;i;QXZC$VresRdkAhIy<7de#G%y3Sh3%OXcy|~Y=Ns5%MB&bJh|WxPg7=h z#5%%GERAHq)@hx+>D_9IL^EAKz>59z2?eyJ_C|zc`;VVyVj6=*qAFsZR+O+nSf{q6 zmu8p+2{xvYW=N6LUO=k$mtI|WC)5cNhqNN-?ua{~o=bjydP3Lb$=uT}bhW|_i5(6P z#?ln0cKC#@tG!Je(hi<5aRKnf_ZkWBRZ%arR@+R9*5Ak>?VLUv(E6U|Vm^LIvES0n z2Km9gxhHghN`ga25-(kU7sKNB$t^A!q+3P08VvItxPX1Whv{Tz#npApCa~$P}e0PnwtS?nqhn zB#v}D`1{{O2yFlZYx@VdGli8&*J7pU1?<_$KhhH29p2GYF*w>0=IQQl(>BT~A=o6Q zbv`Gk$_TMzGnXjgk|m!_B8Vk+)Q^_Y75BK zw8yfO?XldH+Npl>V-9Jj!^&lD{tVT(Ia}Hx@iPkcIZJbX8fQ{sM%mFYTB9ix-5J)U3p z^D4}*-73mcT-a9Otc)c>7y9{lg~N9^>ENU1kd8;UV1DhffKpTzwfXq?H}q-LD8@^FFA(po8GQnkJ0_Uk%Ya;b^*OB76q9IXA5vVLvraJw&QvK8}NYJUEMP zwC30D!b<*TKJJ}OeK|!O;)o_fr?b>O)9M@1bqD!%Kdi#ad}5Rh#gJ?+i9)HnOPdUS zvAyJZnm>?L?>!BXx1A|%7u{29?WdGtzvMS2e`7T9H;$fwphim#hqOH>*H`@V!jtez zvx#5gW`5cD|2Dr6ULib_%P%D-oO8IBa?Uo&Iqd@#RqRI|&1Ihr$AEpJKje_MweNoG9R4{%=NC-)$4mGJ z^5Ny z5jJl^CX5$HHe%qN1O4(&nY#;O`-IfBh{en55VJIm+C5r|F;8pn$;+xgn$L`<{`o`k z3*9u>x5y5`J{tIZn&pbIt9d_9sokVx9CjLgqG7lU)6`@!&5JB`K8tT?eiRMQ@$tEi z$Y!o-t^><+omjcU3F$XyS`pI5a|i|W<^Yg-nFXP!Z5kgh8a`bShWpEQhAu0Q?hfz4 zv5xS2enR+4>77b`?RGyOUo@+dU;Fh8rD#t;+Ec0c&vH~M;@J*S`m$2`rzq`DtszcE zi71JJ7dD?00#wsR__e$Ie0<@|fHMDVM|t!^!airy6);%20`_6afe_`JpTj|Au)>Yy z)hsSb{0QSH7I}$KD$)aDM4_7aPeBn3YN|yM2X0Ex`=NYdlj^qvFZZ~XfL+Sb7HA2Tb+z`ca*ymXr|sK*V4>f$osZn4>w0DELweL55C>1euL5{K~@i$M<=4y75Z-ZF;!GTiE5`6qTti zA`sOeiy<;4)jP2?+vVruQwIB+rdIf6yF?2YV|j|RJeJ^N#jqecwHV8bynKAoNblj+ z#!wBG?<^6ODJ2m7`5Q>soh8IgBEPHIZ@+B6lr~q2mZy5LG&RV_r#Nt=*%8@iyWAFX!1~7_ zhm{x~>St`irqq)Tr5;oB8#ttW-mB}XmE5&a3-M3?8cO}+D{fD7KMm?^`E_nkr!B*5fqj@onwl6S}UxH=9G6a3VMVIb(lJJDL6QDwF2xU3|RW;XT~S z@v);EUGc5K@?s~PEGEj+N=11_P?YDCh|2XPL?bo$`S?$+7UkQkD`hXl*lM+hYzX=E zjRC2}*Ld8)NA4uQqgFp3UyREwAD2rj_MDxdMShQcH8Ldur*_TEpCZqtw65&g7pfEu1 z7aCE%MJUy3dUahMapVzKhVshwj#yXtM-ca%;h=`OG$V-R87%ndNrq073tAq5S!^tp zt|}qZ%td;l>8h)#tVmZ?_cecd2wOL(zX`$it<+}q!H}_il${UO2oH9oWK6@TL1Vc2 zhTm|OzMWZpp&@`O?H^noJ2 zf!3v1+Jm>J8hgBzXC}naEg}ty`GvY^d^{idBKs|(XLD%6H{l^lwZA}m-sc)5Q<4hl zvejDI31Y=)+AmM&dZpB(?LH1xN@!wtrF=%Y`@_muSE!HxUnWYo2&Kd}uhXtQ zPRxlp-bznr_%uHspA#T*5$ga3n(eBLb@9_4T8y%qrzohwIrAFt60ews5=q-MJ-ECEs{MwIZD3c0((Lq=7@xnr%zRA~k z)K>4UQ1Of?f^zqKf8lNsl8aS z#~;o;hV97|*b}!-=aBa3!1lzemfZcYd|VW~uTVvD0DS1i9*$pj2NV7>EZ2J@$>VR( z5}#np4lG|1kPcyf?Ge1R7d0ZpM~YxkzPHZ7<>6yw$#?Yrfd3gl#@$2#f*z-9bRS-gz#NRv$Lembh)xeQMEZOC1ZziW_ z56d57Yxm%2C|J4wUc&52Ee|j4=|Kq9|4>v4roCwzR=p@H1*_MyA!#MF z8Y)Hzdp@ss5J4%V&-8N%+V-P;y3UqUVXMwIZ5T1K_jdtFk9NZ4OL|!;a!4CH;P=+f zxAq_Jk)2p?=HqxU#7*wsZy6xqzl(pQnan=VTM%lhBDOk)C8`oWUOm{dy$O6>I0v>! zP#GOHlP;sm^92_luX9L8A|D_39&WRQY>~Ig^y(e#C!996+|urE@(%Vk)j8zJ z{6lc^1emDjvItZOmaq#;L8o!7%fdD<9R7k!uo5i6v5pYNvKz+^hrFWfb718z2jP}j zA=r2J4G+2y*p(ZAd+u-;meLas(z6>LB+uL6>)FJ69StqVKp*3hs6`%e*MqR}+ZpQ$ zpN6HmPILHWr~1>|IFvd_!((tLj$&^g&djOb-$=|U&x1MUR5h4W63y92Jqicn5Y=9M z8#Sk@QwQj=4E$~MScU~Uv)vA<$8Y7*XST5wzY_J6TiKT5ZR-1Qdq{g54h_eSNQa{d zJ2Y>vuDiQ5H#Hys`s3U!R^d;8o4#McA?@V>^JjQCn7!{FlTtn&9L&e79fw;Sp#?NN z0l_rC;E*~qUimdTJ@j<+yRf5Noq^KUYtV&bUD>`1h7D>?oSn9JHgUdo>YGxJs;dGV zf)ktvif3)3_9Qzb@;PQtu3`4%n#?)JwdB7Bei+U6%cQXVmgY?=^)8WZ?10s6k!4p^ z@@qGVw$u1{J>lkS9lpaY4nDfvj*#@ZZv&kdJ}BqoqkZ}oU*mBbzicgciateUflx~v z=5l^r4VfSgh_)M?qO?G$tyFFycHb>RLpdL>4bD&&9CV1%lQ22>`S=Ymt?|(}IE0!+ zVJtCuz1mP>8taL2K3*3r=i?2tDlK1ET8_gypR&(J1{9xUDUTiw=lNPWizU_NYwYFt zWm`FT)iAR>s`JscV9frE?y4fd1VwsI<1GRfrCp}M&$S}t*8R%$*7Bwzi=U704MdJv z`A7qYP$2n%oj_GqA&W(Iklz!%-+~bNSsS%TM=InHdH!*pziE=&$H(zZVoCF?NJG>j zPd=!)5F`jdVt|JfBF0m=LS9+a2>Ws6$?b{@Rmziu*x~RrQF2cwtp@bg=~Aeuj{G3_ zYY0|addZW9>GF$ElVPWKx0Vn! zsaxfs*F(|~Z7{JtWN#^;#MJ98f>V9{r~h8B&-CiLzbPb~S0Ve0YFTDv5sg}@o&R74 zJ=Cg@{6)1DZT=z%Gt=@4c2r1WQG?MJp-vDh{&q)&6s1_|2e=RcE(<6F9zS_`o+vL7 zsuomxiVY4%IxnTNmpuCc`JlAo!#llBFWU@^6X@Kt%`T_Kd`9B`=@ee^L zYmD@H7GMLw1km`UHXl>I##lAS3A(6Uish*cz@URphY+#yXRk+v&Pqp`5+?Ylt_Qy@ zsg*VMaP_uGlJf(AzQ{gHW*i}5tkiB)*RF(Wtc_xw*w&wEmB)1*^vDcl!RYQvIbW!Z zb%f3$mUT@zAFl~kDmOW1C=2#G+zCyCvmvKC)>y;&0om(?uZcAhvwv!Fz|$Op&FE{W zA^IG3nHXo#a9=Y~%)5R3I&T%0k^wkTWU4Fu(MZMDO%?dBZ_Q}d3{z(K(VJ34}+O4~n**Iz@KGSfz3!sJ}<4?A8#4_z^sLoZz76sIKd8YxPHUU2SV; zp|`K6m?l2h7`kL1Onl{2Iiw|e`g|{QC+hb+k(tGC{(>~O7%y$MaEP7g?b*dI{|)GZ zosj8}%ENjqmhLFV@*Tz4vnTW&QTDm~{JM%Nkzf1v45iTFi(0SZ-7IF$O;0R7GFlb`$k24+jmo{!j06N$wX`u!sGmVX*ihn&yu-az_& zFsHvUIC9WQ`g4%}9IPTIs5gG9!?EfP9NFk>QOA(V8@HIhuPM<4Ui|IOr-@klwVz-) z?Gn@a>-pyO`-|z1!O@QJ5-fY2soAjLndFG^UVpKCCNZi!Uxqb4USy5#4($h%iH`?n zVSa5VR{YiyoW0qX7Faj43M*C4?x+RJRx$<$(ctc`)KgfoPhCY9p>u1c*Y@P$a&xROH(eFYt{VvJK*S$cyYbfm;K)XxeL;#LJalZ}Xws^h_f6U0Yrw{q? zw-d1^v0~r1@;^evjs+C1EJ}>E!m^Wzn7bcv75j~ga_KehJ)qYYmvKnD?iBR;)k)}e zm5E-LnducU$(v-z+qGtzUG{Bg_U%OO_gJxymH%@@ZUE}_5xM&N1{Ap`Lb)VdxC2Od z(WM;HDo*x&+8_t?%lQOV1JJMAmZ9H4n~8pi%h2z~Kx7(J!s#ywM2;2n$~)kC`y6n} z4IysANwa*w#FDEWnGafh8Qv4M@bRJ{(cK|-%8l$YJ^+I~$3hdSDMHV&E25uiQA_7! zj=33zGpB>ViWX$})bVleENW};Yul-IqsNU;wh)?0P~^EZx^ zI~?w=H;B>3M%KzkRtib<(;NZM&hW=k;y!aj%A<+UE>RvN$~4cs($mgIcUllCmxq2sN9nb{|QHQN7#;~9jV`tA=LHDnCuHteR#9s6uep2 zA8($|c$1HoW-|XOl)}8I)yKz+%2*oL&^~EgV>LLPLT|y5W+!BZK(Z*7?{vtXVcG!hc(Qqe4G zml@QqJ~g%Zb=MG0F{~TQLvXAk1e{!rm6;CrZp{sWDEpD+CuIkzQ~Q)9akU#e8~_x4 z?KZ4Lhn#}l2$@G?#s1(adRZcr;N&M9Rz%(bIg>9k+Rrz(+J*#&dUjPBVWGcdVWs5T z$$<6b;3)PB>T=c2XEegSelmx&UmWW%KOWfz&VOySnsf}^D`(Dm%~r?NIV^7+7#|!U z0c|hwDc`ps6ws4_NROkwCTg9=#|wv)N4vrWIMxyV7)za%QhRDoKj$-^Ove-De|AA_%EZjyS^vByAXYK9_QJ3}e@qsU_UA)4q* z@Stw}gmRlP@|lCNv^ghWJGvRm^)!u&elIbT_qzkzW;-Ec3Jw6_*UipOmx48-?Vl|4 z9IcltV_l(%CZYF5t(8(T5ZRYEwTZKe(zdG=@ji!KB~(kpux<-T&DvX#u~1~B6p4aU zoxWMuMY@Lopq@`uc1XbG?nLT|fV9coCG}`7_?*e6)JnRWAbqAkkRHx9oHG=${l><( z*@E;qR4GKygT(1=Aq?3fC>y0F#&)Psf9-YV`?Std}$%pME^3h?Ik3ak`%f~(< zAA3#m@uO47$0i4sPZx=Rh=zb{%oPw!1myj1BOuYVfQ;%#Ku&yH0eSYrZzUkl{+j|a z=@bG&a0bgqhKOwLD} zo^2M9r~SEd@X;0~2M_jnwaIwp3#Ub7(R@Qhwrm6u317q^ZQt=e{{5Jf$j3pmd~Ez* zmX8BOJ~-FrW~<^v!^nMr3&LSIi1>LW)b>g23;)< zLI%i9zFawZqJzoF^ggdv7_WNoNQ=ZNvzsJJ29`ukh0Hvd+-yM79p#SR(unEN#P5PwJh3 zbd=b@0ckr{szy3Q`Ns~ComXG!IT&(?(k@Zjk$Q&iIhIe)_>f0KRoM6xFNW1Nn(j;- z$xm>`$(TS@G|lo4k?}4OyrS$EN@>CaBfBV+z!|XE^l{KCVr$Qu1>Yr+tH`+LIj|D7 z5a=MGf0ETm=r;`Oj6uyZ@wZnfRi_&wpAQ=}-8y8Ur6vD5v*gcal7AUw-EQ?uotd1d z&Vw_Q@e5`s!qcMX0G0f@jO0JWmIDnMokH@DFv+jWlKg;_pyJF&-bM%!nf5n{!dQ30 z-JyNIUyWeO!SCWDw-6l05}{d9gry#$3H(j@WmwvTllwW+y0XOLNPk!@eR9_fhg|T7 z+aW2B=>}0s5D{BMbi*Se)eS}FboQ=9I-k*?Ev2YiPc6gJR-zX=;oDhQ+N#yU44c#@ zF#Q1YOkIwC_>6tU$k^OHUua+rKz5{gi*wafq96WsTaJEcF!Vzy0e-Q0&vn<;R6jI; zeh6HVs|1$4$&|o^KCgaayxMbXT0dO;14BRD^cv`g(@Qv{eR}NV`r$t+U!-R2De^`3 z(R`6(^zbvfMdN99Y7o7$+>PqK2^{HDK$U72ajn;|y(Y#IXMkVZjcu3l@#?{Tq7~_O zx1ZP$lHT*ZOhshloO%mJiyK=z)c^0+D8}(((bQ1Lb22irUk+nNHQUPU_T$Pb@&dy>ENSRSP&|Ae_F1Dt^TG-Bg^^t zr-8@`D<5ej_D}L7J7jfEmqX)g0+C(^A6-K;2*y`OQH3myufdW1dHyEBL7vt6 z+6>}j&Xs`dRtyW%a4-Uaf@U~VVGaonb=|A!%!pfr(hA8xzPh5#KfVST<6@Z+<7;sw zQ9|CQ;mNcP3AAIhuRw;Umtr~K!tyW~!)8E-NG2P3h~5cvBOjj+$qPGu{6lXV!;s!% zX2y$bgV(?#VSUZ(Wjna0D^(-A1M;OfriTj2oE8+7ISx^|zQo<7{qqD(B|Dt7h6}Jf z-8gw8G=a!mz|&4s13*lcZhV}k25@GJ>ZyIYp4!c?n_h*bO#yywJGNmL1nY^IDiz%@ zM76QfBt~icmQlx~{&mhAB7oJGsdKw8P3&b6K!UF7RlCUj&hwA24ah^%fT&+jHH#?086^G(^!^p(FV}qw1&Nu0><`dytlEC|e^HHo zaN<;I6uY~$`hQ!E*6jX9YSe@Ochx9PtI=N^2Q_*&QKR*LWNP#eqjJ?~$p%oPpO4~@ zcFVDW_M89b=~M5hQ|ME69{%0jfal>G-%26>)e+U!$Z~tBvqtmtjC|tf%=yHL0rQCu z*aprgKFxNDK1F4LP}6TdaZNg(xGq4qmXN0nrc7eNmdPY8)9S71OyXIA$VO+SMG^= zf}>KNB#6>nhhHA`{Eu7+J_-2%jMW{Gy=A0eZW8TXUXy;UW%`wA)JW0^YSbW~kB{S7 zG*Dx!aFJIHn4TR9T8q+KdRwHs`&YB1N0xN!rAfC>QI}Da4yosSpEEK*{e`fj&Xj*U z^KWSf6t-lN5^erMdfo$+S1{53g>~4@4y`Th7aG`bV2mIKGy^I~y&WSG<){6smu4LI z$+{IxS-0tYTK=JC=v4aL=y8Cqvamq=)4n>M6aIx zJEm6yu3Wvk?IqBwzj6^KIo9v@gBHe5x-|qO~}(v93iPlC}W0Z?K_soUX=b4`LPFanmb$ZW>(_ zrvZyXL47xF%OrC`)&<>FL`u{^iN80S+9*!9fw3}ToTGq#FV#B{u~uj{D0ayOTdspG zM9x@8I80aiVHJYzBPx*#p1v;qq3Ed)vc?jI=8Lg}%jANU^nED=hUh66xD-oC?VsTH z!U=g;U34jjw4IqJL3P2UbWin*>`Apci#=I}Tp7-lTNM_C$- z3qCkM6=*?2R?&`uXo0goS;q-#<4dQ)`wz_}O422QH2y?N%;(TRcz=En!+U#aU%Xe% zKRMnf%|AKb3+E4j_iHUCy!V!INDEj@ct2Q{i}zh+2Hs6EQid|c{`YwJq3Y|-FuGsH4TDfineO;z~pJi@PS<^dka=#5nlEvxI@B;Oe z0{JEvYkKCz)Vg#<^JIJBwPgG=9I-^Wg29rPfn)}Jn9GZ$%LVln#yIqVz2nfT{lr`b z(3V2>R(po+#%oa9zX$t*v9#4-7xks@(XGsDuww7I&NzgUSda3bnf^JD^*CfaZU~sv z(z&E9&-0*`tc4uXZq1DcGCpw-jL21jQ=aD}Hk<3>A3E{q&9DHPDiD>cEZA}kkKQaw zZKC4e>8zAdWh@aISt&aLp3d-|N*Ox>p3lSYz|okHVhx)CTXOVYsU>&6WA>>JjVeMY zmf&LtfidiFedmnyhrp!mIQdU2RtjffPiOrogDZk`9{7N9lCkrTNJr-fSea_Y$$hpo zMiBjUt<$1D^c{n9r1UxX@-Wu&fu5y0uQWzqte}v*F|0pg(Idd<)pKi&xsM2=S4t zjc5T*9^mN5A=sH$a?vn}?S_T5VfAk>rgg3HVMSQwp}v$ne+ZXd{`B=md07-{u&14m z{S?^Nq~TxRU@GIFTKoSY?OouTsItfLJ52&9Hr%w(2MSB8acvbbL0lWbHPwK3Fu{VT zMZ4HlL|h+>GzDByC`q8>Fp93O_*_@lw=3eV;o&Q3wJ#9+P=un$OPwL;w^XGq(9ZvJ z&z(u8Y4N+?-~aRB!!$GZaqhYIo_o%@=izP%KpcSzWh&e?WEg0f?4OvtnbJz-q&9l* z`vV%K#}VYN5FgW6gsfw=T-8*@b-HWy-gflsy+Ks7ga<~)`N=MaGh50Xj9%%O@^V() zbp(8)sU<#iEdllK`0=@`GDUNLwymhg~^atH|>9X{p-Tg3(f!F()4_Wk@O$ ztzAli?~Q!Hz^13^_nD%{uFs@UV%LKBZyusK`TV~E1^O*}Jj@=OZDypj52=5JXnzCk zHzW(XMmzj2$N-|=^I#4OQ1>`=Npyc5K&jQ$7M}`fE{SM90o6A5s#?T-3h{tlO#~dG1TC)RLReT;Zw$%kd6EqF zHE$SLa9E$fg69({yYXZOTb|$imT@Rnb|48?T1CqpvmtwP`0WHm6EJ~j3&PcTqRlrO z;#W{UaEZnqUTxR-ysHaE+XVKV@`1l6TI`m5(r{%lU5BF0#y(Rv&_)`rWSPW;-uRsm zh~-P>7T+_`jN&&W-oS#D4^CupTmC)Zy;I)C-a|$#AVcz&iIj;1T4{F3SG{iBf(lE1 zXZd!Hk@;ErJgm$7)67V@vHw^6<*pnm|6({AIFE=PtIkkpnU?hk`F7db&z#MeqRJJG ze<8m+!i?fC4r>4Iq5Sbg>wr8&oSb*Mf%C}Mu$;(|4Z+msIPCHx7bG^jm#wROGSFd9 zo;bpclzY`v#RFJ!Yl^=fZB4X>o2v4OWZ`aLm(@51$I&1h`lIKY;s6?qG23gq{K*xm;-%Et4tOQ3`f01^ZcgmN_ZpN9CsBW|l5+ z@ciC27*?mw7Z+>s?#>{R?QhrXu1P7`HS{^A+n7!7i5y0^bXXA)7|oA=C|^b)`HT81 z{w~6h_%p(EgH;ELI~7nOmmn<+f4AkFa=K6YV7hG}4#Gn1jlV7*D>9?_|HuyyGqbdq zr-b*>Q_Ocb8n^|xeBkL?tA3N90WjsI^Dxy8D(9U0eezNG&Km9aD%M z!Z-bMNTpICk+MJkMLYB2<55z%kUzM|M*cY)@EsFxM(rYVET{*6PCx!IL8y=dTa88 zBEl|N`E4Tl&EDUdQGCC=)MQ4=VgPL*Jh!Bt&z9|3EP0dN)f6`oHAcid;@(~9BvN^! zJ!B$cd83_#_XG}U63$qMb~~7iHsNzvGfw> ziLeba?g%?c^#N5Kj!1edn6vec0+o_`chzsTlgUMd3yIL#N$|!@4}PrP6FL@9jJVNK z%rVAJj=jnuB3aJ6)}RKMfDn@9jhsQF^wdTu_DQELSZdz z-=Gy^(#KR*?B`>KVvr%QQY)4Z#m?TyA^GjsIV3;Lpxmh%DrC?KZJ-#JpXSU+c{#y^ zX`Ff{3AY9EiO@n~$48Oa0TU5Vr+68IAFBh`Pl|?tp7}%0&jVble32|5@=oy)#32-J z!)q_$5K?Oe_zl($bXkx!Bf?EMG@3(59QXRPJQO^^;fMMWLYvK+SeLJKz+at2oJ7LM zs>Twb-WBb{8%KEXV-22=8aPM{vEj=d&IYsNH+1~()ON2q7C8>OO9ZSWdD-U`eqCr;L&Gbg14$Bq}-}caLW9=&gh>ZXNxNegnA7Zjs^n3 zp0q%TLE}f9!D>1~@TVxiB|bVyZa3;^cG zC7dxptk*GGj{g((OaHkB5XRE7S#NPj^g2chET8O!Z$P;bal^iX6y}Hj#Uc6Zm*DI4 zvka}APQR8v4Ffrw4#cAl@~8L;rP8wj*r(jHyAg*$sUg~~x8)%ED%=!Uv1R4o*0UW! zsZ(A&TU$t_&iYl^Iu78fvsgSmyr;@b#4=}nkbdIy)rgR;rngj4#8{4H& zRkHHWqs%D&jDa{d#OM+RZjVTo;TeX3kD?WBVTbXAJ`+-gCui_fgVh-d4+%G6=^=nW z`!)cFd0%Mb^*KC457qPE4)yc^4E3~A4mBNyn(%7Z#;doE!@FrAuZwPevx9Zbv7~76 zbb8Vh#UM6a_G_mlpdU%9dnx=6Xu}u?KbN>Z8VKp4N`Ul_r|FPZ`L&wFSznzPd9j!o zZmY_bH#(Cb<IVn$ zb3^Q(fKSULD?gB*rpv3w9DDKt15D%f!hbxIWe^`@1H$Kk)55t45Va>x~p=MyQzL!|7r zw`L=}Y3$`hx@f;Y4++og*kuR15)$mWVtEGoC@4rssPBiV^XxhGh-!JZZP;jwA#xFsaEuaIn|_yBcgdJBwuGs;K#A?-*8f39;_ zH2J&c>cW3G8h9DN*g=bJNmP*5u_&Df&^}cq?;(Wp;cVyHi7;+K2jwQm{TUuRNl`5+ zY9(BZ2(|$G@(1>q*O9{v+iGmLbU+f(sr1nuqiy5x0uCKE81JX)Q zDq!hlT}^C@zo(-ip2HHrNHaMt!nl*|m0+(KOMlZyKm`%!Y+p#kaSNnuM5^mlbrd4i z2NCL2iMwv;0wPU5oqR;wWGL(>?#{}QL|U(^;}Zob%PpC6iIC8oNj^GVl)`*?eR8^R z=yupOJ}rDmH$p64OtOdgJKzVjJHD3)jmmHO8Qu2KsRxV2Qu*0u;T-HRva4Ww@$t(> z9(ln_9FjNwoxNgr7}wBO&(T+G|K-ai^vzoOW;0!Xi~}?~J^9Jm6Q2+GLK3fVg zTnL|J*DofdoIL)k_KQ~x8DFpgJDeN%ek& zP$!Wlk0I`#mO6;Iz7R@xlcK$l#N@+7LOJ(Oc=ZO@+giixq2Z$4w82pn_zK-VM8Bdg zcNF7D4H*Sid|KQS$elOtIg-axq|4i{McN7zYoI}Zk!vM__H&(c6y&I1s%aWtVHg+!|EO!(G#U|o9fIg9YTbe`n@*Ia)*1n9WgjTgp9CDb>+qT&AS6IVrm0R|=pjYl5a}kBe3ZoM ziKUki@kxft4pMy_A;eXl<70gg3()$rfz%7t#RI#Iuc3swFiD{$FLMvH%S&c6)}~ac zfN=yI&UhPttDhQOoK;NREjSFBvNn}e;7yIh-HO9kvLol~dXsil#t562E+L)DTmkjq$_n@3D0ylHNQ zTDL%$TY)$IM8tcGiMxO4SlS$1Dle*;<5))57$L!rOY2;1@pl#o<&GS>r1In+pMZ_O z>nFn&9!b%f{SOY|OiHD-9%B@J)*$$<8UWv2&vHnv zd6GkNC2M*yF*NO>6<(KMMYUJKBsFn!q zuW%b)JJLEJ`%9@)U4?)zDs_gCxJtJPnhyA_em(2+IM3P6k>nTHk!?&9e;)Ai_d~kn{UmES=gU9VlZ}W*|c7J$JVBS?y z#u%SannzSmJ_8yVffk>lgODm!;T`%%@B~*i3P;v~Dx}Sz3K7;0IUhcel={V49zTA+ zqvU-LSgd=$;)5qVSZab0(SLMaEln8f7w;+dyMMvK|CkWETADwW*RPHFI=TH$ne;0A=xS1G=2F}0{OZBWg3d9J}a+8a89`L!Mp=i}OjV&E| z3u)XaDD6Opq{|Vg!Rr9y54m?KLh-Gl<){-{HM^qmSBYp_b$uQpVs<5cxLfgGqwz-v z$fjyF+e02FyL%CIWdbus(MAME`^5wIpU+Xo6sZ)?PilC^;cp>iy{4DLp`G+F*A}GG zq*+&bSU1U>bk+o|0fC@zI){`y2Nq5yTeBv_c)NVo9)y%%;L5@F#KIgz+`Uyxw>Y(y zikYKOauCdFmz_F=MU46}_aNO2t9i)<5R@*wComp_zh)w3oX!j`#2c)H+eU;Ntjeum z%d^wf6h8zPeZtQ89wHo#3qG;b4oOUK9}6vUdLJ`UsP{Uo7WrT|L!BL*&Vd=D;uU3( zC=CHW7>)iwwCD?zH&3c6FI~m)?T1OQ*W(|v$6wIl7iM|iO*ARKdGv&CgtXns!F=+O$ebX8y}3B_~8DNif}?z@z(DUUbW%`q(I?= z`}>v36RMg>_pt$Tc+4Em#}pHzZtIC5~i_n1&;*eDL@cIQS8K85g*g ze%Uno8lS+B9zrFp`={YZ%e3@XD6KO(Ytj&!Db8sr^ouu8N_4Q~1C0`W%Lh+-uyhVb zDbb&tS4)Ajkzf2vvESW~gP9y9MR%Uf@!g*=Qq%xJANp|UL!^C(rRPbrUhFQNEmX|y+i>cHr5*oczP7h4u#Q1A;hG}b7nLDP< zFa#g@ucz6k(rNZNIKDm3=J}An;Gkc)!Q&VHMCbX2iW0SM!8GB93SOK+=XnQjnvthu zs4&kKRaJc~iwPf7hVv|o2@Xz#Y0O?;!DcK|;rm&BQG$|wP6>A-B3ZiLXRPBQA|+9> z3`dx$Sq&@atqD1*p*&psY3d3|3ymP({T*X032E>+IgNUf1r44G#sm?cSei#f&q7-D zd7ZlcDt?$RsIM44-vA z3u8AFah31B6PT>q*rS3y=H96>h_$`86nS)HNc;4`VZ$e|#r-L$eQLbSz|l>jpLW3~ z`5)hzkTUhyugasRb3@&qdx%uS^3BBXDp1)AF%pw)n!+hyB5G7q#m7i&pDn?z$$bjU z?UMf_t>nQ-2r@VFHC~6k-s{NZ=r2;^b!5rUWN=9Z+FT|TQ2qtXjzFwrrm!RrB#FC5 zA(XU#6fzt-mDU8Q^9vA4{46!x>Pwc;x2Zasw^wN>ynWE8$KjJa?^_dsvt(%eExB|^ z+m2Lom(4#Lj1GzoSK{g+6N>+9`zhyViF4rH6-nHGR%x1UtA;WCDoTEnHo5hb^Zgkb zY!Bdb6q8^t7r31=S8KuI2h2Rg(@F3|LFE)8S-!g>84T?%2|M2;Q(ep&Vc%-1(C_ zWE6E{g8QAWHq!kQ5xU}APJyR&r)ovAgo_e^MGo8bz#{g0nf=2-6M~FLgYh?gApYdv zYa}3F8Z{F%Vmd^UGyYc$~ZW1 zeg-BBpey@QgwSyO&{Pf~N<)UHTqr|c%O@eP<&*SpVQ1_>rmIWX>uQf39`5Q8ev18Q zckL85(jrpEZ>smJs%lbW`1)uU!RN*n5d1(43cVpyyKq@+7q(kA!SC`Ge0_8Qp4hTvB0VOqZ=!V?yRbdG zft7D&^(REtD2wH)!VWS!(QQ-ocy?Y!63W;nm9*{Zq<=AMhqt~l7}Xj= zvB}@=Ga=9g8R5%IRw9I@fyhSbt$+y z1lu^qzvc~)lGPw%)n_}I?*ipA?}s-}wIEG;z68E8$|aXJkfr_1Uxy5HQU2lz4k`p0<;(n~F3W5N1Th!GJyotd}tCiyRoefPaNF zc@z=uu_oniak-<=E3TnXPm_r16&{8Ga4jXl9+1fXmK0g1Ya%O5ME2(ul$CD{T+LKd zQuY~>L4K*iDN{o9T-|cf<)BIFx-c(s#t}A*k1l72nKNEXB+ES&>=2X}exvGHw_#&3q9fv-HD~H`I2%KaYua{b)#nK{)OYcG=jk8wd zAmyHZCIuuGo>lI-M91#HA#I_URltl&vNVoP_0xLfKJ9q?)A-bDgC+W>z2_S~ol8I6 zsDGMgO0^B}+{ftkp|qj$xx)I6vi)bsv4JC&2@VisCu(Oyj9v0Aw$e2bj#mAJQS@*$ zkU>P-Sszi%HdcZwKTF3!#O!Q%+fsR$h}lK&=O7}EJDom+dQ#K~tc|;+@-iZt>u5V4 z1Ha+kvG_eAGXBH8qjCd|c;-*#s$L<&WQRPt7NPhvFd0R)2!Xk{{P4{%z+DH^5X6UN z^xeQ*4Ji;4X9(d)bq)tQgG-Ximq9Y`DeX`glOtABi9H-$@xHD*yZarTGzppoD@jl# zPl75jT6E?fjTY6f%;EkYbR0=IjQmV$vPetCnRGI7_#to=vXsKlz?}>th?~!5_gpf- z4qg6tLP%%tjkXGs?Qs~W7OkO}jTQzf&0&G}uLAC%PHGK9+GBS<}bzecu4{A<6QV7IIO3b~l5vFuvVT zKxg}yN*(|^bfK!o#Z>&l5O9P*oRB|hPH=XTFocDhZ%B&o9cHHJZ_5YhN2Fqb)y&rk zNviE7NhpIROjgx+Y>1wGEF-2>eo%`HTddrfY7eyJlA-(`gj2XZWU8vc?Ln^HZ9?(4 zQ_kkIgM7`cP1kBt{!=v#n6Bh5b`J1Emp{=W(-sG7_Bx6~nn~PwHpiwML}H4Ch}rHl z@(>Z%=Z~ViZ_JP@mq@-$m8i|6yNwhy6L31l3zgbL#YUnfR7M1&j2`Q7{6d~-UlR(xGJCVj_hD=1v$fOLj zKs>4b>DLco{|%eE)Jep$V&ZPcp>$ft-GSF&&6&_n#6=#0-#_+|hoTF6yW^K=N21vQ zS(Sk>-;*Q}gcfhoi+@I!uAK-w2t+jw_Is-Gi8RAQg8QSyy?gP$iBRUG9eIpr@YG4f z{Y~YaKGEYL(oBwTmzj34$zRau6X?r1lQ_~{dl^Eg%0k>7RilVd>STcm4Cr6Z_JRiN zVG}#AKJHHd3z3$WJM!bvq2ORX)=97|`;ORBu3a}T0yeF>-Gr32DG7%L%i~mzT+cMV z;;cMU)IxdANv(2hVA=ml3QArRyWO>w;mg&>NC8+G{cFfCc+)lI1aGLtk*jV?si9H%>U~+U1t(lXCGX_Fn-vSbt=B``^qbLd&w}jxGK@ z5wlE1!*x%aPWXhpEccm$TDAR(=K1K48*OR&r|R0 z|5(kTBq=kkgkrUyrz%P>sWx7Sf3<>{HGgO*2By-Zs*P?4@3LYUAEw$59a+gC`5Xq) z1K1$poJqgVyBk*4Ijtt7)b|DYj z`Mf2}j$wDI71rz(M)SI#@8^)bVu@yiha+VE%3h8IDv;$WXyW^CQq}Ev1&8Dr#!7^_ zNv~U}9m<6e(+Zfc7g#I%KS=d^X?lP)0KA-h$Nf<3mrqkw%r87(bZGIiVoz^Nbz`|t zE5?jv#<5~62Nj$1fN?WW`CNWvDTn0cRh)L|5stFRl(G_lHX>ZAiJScay)13xo#jyM zo2{ukMY*39TL{HOR_wsm)SXgGXP}Su)n(jA!;pRtqq9ShIXF*Vw;3TNlY%&%Rr}=! zLpAXExQjOWYJfw^+GMlg_HY4sbzz^X;!W29<^2;W zI)OK}k%A5)jlBd%W-cJ@hk>4|q~JIac9SAXdRpJ2q~{wFi(5$1E}&FC$07>iG7|eA z6A6BA60|7q`pVS;9vW6{(Fr16w~+D{Q&>mwmGm%-`U>j2c+>Q9AKp-hBQtN~HHyB6 zFW3b^&bi}6X!i-VAT1H$_%v5E{=)z(lKPcJ971VCvJ_1)OyTb-e!jjP2+*+E@c8?7 z6H@*%AjA#{9;u+jr;HLGaDZGySQLOdlHNjD)nz2Tlf1^PM^-{ee#;wKOn|B|F$0p^a;IR9Oni&(;mBuEZj08Wfpz~P3`lE92 zsrp)H=xe>9uUG$D8x(a?D$3dpq zxb)H2a*$871YgTRywD03Ni9ISX_-zAAzmEaLc(oI~>^H(0{sty`7y0e``+zPU;0Pd2ZG$K8*E{l;g!eZ*1d}8*OZ{#38CR_YPwSMVx zl?3;hAalaI!2ZO|yfni~+(&WnDHh5wj~AMSSbH3AJ%}pZS4m?>gwlNC-=Ssk3|=^c zZ@5B}0@%x7ot+5qr%(L*`Ya0ao`5~rW2!W((fISc+USbLpY&1bNANl(_`sg<`Y6t% z^k^dZCruDyOpO6cl?7Y%I*O@CzR(BJW8OB*2LOE)1-eq1h9h=f*e1kE+7FlDtp`x$ z@2{3dpXQewXHc*=P_X=BmS1!@{KB=!FWhSNiGU_PSZntQPcXP&p>X@%pC{pdmcjiu zpHL2P7gD%AI^4(42e=CpaF^39X9V7(0blNL#@|Q+FMDQcs?;^34Ewy0?(<)6)mXJb z&WFZ5xvoW*B|~XUM5-V{BS-)-c^KtZAxLQZK_b1wmP3FtbjuwJ3D@IliVr6evHFB4 zeN{%>y*PLehfrt_4$p<#eh*g?LhQo)avU*RL(!^XL@FtcH?TA;IFc8t!=Z8dB?AP` zsqs2;K@skgMvtL`F7=1^RILaN^NBC$Qy`3X@VW%)6CX{^!J}*rUeMhQPY-CC(K}c+3Ss|f`F@vL-L)hxi+IMu7iGk@n&X=Yj(&B z=MNbd(ZhIjF$)+$`Gq^6rTa2d0c#2j4cJD;)SUiNs>$y9fRV-9dJhyk_cKE=@PxQW zE5=M;{(29G-Y2s79J>?T& za@%X;>59CG#AFLw7{Oj^)!CFWV~VO~QWOJc{CZ9$?$)Yb43eZoRJIN2(X9p4ls6qQ z86p$7qRJI2+=2b_y)EU+N~!=x>bg1;f@}`k8AQPzYt>mq*Z5dnZE=hvV}b`eYsQdZ zt(mAz#9h1i2zXIKGy-|F9@7}ifbDvPp$7IvhQU+pe zRhkg;3$b`UFKqJ(F~9IF-+qXx%Z~XAe)gvy@k!=%+HSDNv!r7hCE8tP2%8-%dCv`T zz$1Hj;S;a0i>I4!K}mNfjYft8JPq$_q_!seh2A zIhaJ$TDT16-M`@QGjNntT1o6j6A2%~p(=MqwdAO)jhAjrBksDY5jesH_iazFt;sYVC@nA%d7%;|77Q&d8yQ-~3|AbV&5U%@dx{TE(Z%<;lb z@OkZ2DpggTA$ik#e8SGS43JNPb#v4&e3w{?s=wf*KmEuIX>z(xdZ330JN?49M5y%( zKl_B9_}fHi^b6nlg(LBOKH(S-cv3D_RW;a?hQm~bV#~wC$G%JBgF8Gpa&hpxe|yu9 zdFbRWRY~wf+EV*wPzVU!(_F`trao0AF*(gA-QExJA&AiJ6Mj^DVAb%G5~APfn*16v z4>tG&nHRpGhtDyeaM&j;RC&DN7h+Qzt1SbcL0OXMpb<5vDp4H$@4xho&W#rkjD=kk8`=YV%A$%*W%% z@U5dLC8cM@m^}p@=`A(GljU$^Sa6>y*l5R*-1ja7ic8BKEBL9qy`a5)AN|o*(2~B( zSjq60WN~&I&OXSl{K#C!5sh`o*WEZ|a7vH-?9IS?bnh}$28X4O??_>SG|%%l@8FQ! zJ)cAJftv>_QO~DU$3VaGnU}-k4EFf^Jrh#C=}iq+mU>=X&GHKlP;qGSKD%S%L8p-k zdQUB=rtSdkuEn9Fn)^@ajx-}n<8=T}qgj0wLu?}=bP&pNfEXuq)3wG6yXdhT-h)^F zgF|Qmtw*WW2@bDb&4B~@q-vsW`_P0Cis$jdZc@FC!cbE_*~!P0;Y6+X3EN2Z3HoyJ zX(ZTRxOlg+3mCHgLL6-1SXm;?<4EvCAr8I;&5m`hBx+rJ3|*3^7p?OAu+= z$yDghO4IN7z~~SA^1lZ#)_E^9kH?c?YWo)DQ z9EGy)CfF4lM;La+d9?q^g+?La@mrzT*;`UCMSNPZWl-$yTR9{Pf8>xHVC{10sqMaT zttKRVfl_@<*TE-w?q(BG9y>W?d@;lg<dk5YAmOGZix@c)G{eEt0e99gb)c|bhz4BG z(xz}^^oW(m|4}sxEaF(?+=O4UWSMKc8i3e6p*etye@%tQ%j|(KW%IYmgp`B*y1kd- z{%~MGSaCTepe?Q@UTCCZcdQ4@PG(UeQI+AXN#kR(m8iAxLoAas=Bpb!VLzw&q}eKD zcy8olikT1Y$9#1YFcA&4lz*9ClaU3rgSGwfm%KQF{X#Pz+`odT4JCN%f#G=7ALxnM zUAqE^;A4ng^VnS~FZ9A8yF-ESDHN7Vv<)ShA(UA6ZW7!-oN%r2k=_~;g_{9dN5V}@ zf6#6@HE$+_%NPQqz1ox zTjgzD962Y!llIO0#=Hwa~)`Gt>DQT8XbBs2~Ag?)VNh?x%_!hH31hO3!nOgR=)jUg4y5ePd~~g>Co`SJ6uhM zibn@j++nP!`U_6@)AuGT`tgRNOt$D^q?yCv_d!WFTlEQwU#N>`@v(i@X*hDGKYbq` z{KU$u&3^Z%ixt27Q@lFNA>=Rmloy(Pf|9tqKjbgi#|L{k9R8N#E!e~1a63Y13l%|l zFz#1Xm5b+lYfk55aVsB;_7Js}r<~{!f6)$~(B!B5;y&KJW3i7HntVblohY4q{lQ-# z`_t?Ez`dT}0`>9nexZX89>je0Ha3r^^RdHbJ{Ubo)MmfX&IfzUm3!hZ`oqVTTG0N&55%;cbTfb`=}ubU~ET_A1k;)SCyd!H%&lnWGE;@|s(qsn!vs$MNQ()r*? z_tHCjLK79xw!xyaoTx_ z@1c7%;qd1x=C@Ypp)vFr?1)OS4RT>~I3 ztSyPB!sr11H(2!t4gPTHyZq8vI_#N}FH2?PPK#d-;|?Ccur79|j@_R^%#7Mr=K<(hcHaM z@Ts!pgsRHl{%&afo)(YRXtE_)8XwTi%Exa2HCW5PwcnpPP*N#tCz9>C2Ut1@`SG{e zVQqIc#WO?nAirfY{WPbskfF(8jOyIpu>I~KFJ?m2#@REI+Z=k;PvE+#=$-CQi`MduLD*N5^cl^i8Sug zNnn~1_?pdJY~L^pT~@A&#C%sTDcSl;N&0sHoqCWJ(rB<4VfOQj5?&NIMuCa&4(6^fN*Qc1tNzA9U~NA1KLE6cR7 ztT=ophY4y*lqZUHF@ajWGTArmC`lmn^~yj%1%k z;cE+6Ls1-pxOHMlK4|o*ZN$A3hn}V%Z;8ScXD7k2%bj>rNd+{FH`Fptm~xbs8*}pzEnvqH%ZmS@xT;zx zTAuC9Md4`GmGvdJa6~L&)~CUJQC)zr%5|D}P^jf5a}qdxNPCyh=JVK&eWA7S30`p2tZ z(~AxsMo=~mey(ahu7*fNRr$#l7QL#f$q=;S4R+<(lW^(ebVZen zK2??9GMkXHup26qR+ z>ln?x=~D@ba{CVI7?X%ypJA4_M6Ui8JWxl|ZP~qxeGqxN9~+63-9C!#L&(lkNdGEv z#;fHwCu(QHn?|!)xK6B|1_+w*stGCIo+94v9o$g(apDNMCDeq&E)Jo3&#fGe_#DNd zroh=mBz9n)PeNq$d8>2K8d6s8b$ABYr6*Cm{AIRK-E>bpNGn;2R zyj5s6o_Kpf9W(AyGn{y*5ougjRyM+$GOmK@foKLye^W;f2j%#HbQG)S}Zh$)Q-)lBbK!t8WI?W#JYqrc`% z;N7onwN%PK1l8Jg9^mTex$t=KB@nQZt&}ZVAB$3H73VlADDNqjJLU`Jj?rSku}}y&^2FtiJB8(rv2sIqKOi$- zqqK7zrIgf;CJ>&3WuY==vSVSAV1!98(|7u{n6ja0;BGcG;b>rn0Uo`m=yg=kNff<~ z`N2jzC2gYDv5@l2qStY!;B}0ZCl3G#g5eUQf0Wj#)YGrVpCP)V6U!(rzw)99DQz&t zDag^I8zu7eku6jJJ5yWU_ASh8!RN@6-#In_DRdV=nC4)wEii)wdru1t6-7}Nc~G?O zF_$I(K|X{}9$S~q^d*cK-< z{%7|wiFith^n)Qw!U%al(8ZMXv+xxDB|?n6rmEu+4uOOu)RMCi+6RV89&o?zu4kW5 z0Nt1~N+s0?phP$t7)7MF5z0rv7=p)6I9l~*h-yuj)nyQWG5e|Sa}iOS!9N}1{Ds?b z_yb)PPiY7xwq`o!H65k_VVpP{ot1f(CSO&Z$j~h2GWzk0Y*}} zw!NNeuGc3-2QhHivJ^( z{!L`-cToP9El7*MN8F8C)ci!>Y+gCU3JtaOpE$0j&_fS#w*)58GYmF;nZD_@=rYD; z<=&tA)%b1_ZmRlh;D>!bF^W=Jp8yBU zQoJoT95<(GeW-c+rbivrdWL4ot({qVKhtPGo&lFWX+InGt&{GXhcD6iuNxw%YLx(d zlApZ-=ysAG11q&`=C0+p|8SugKM`*!s08-V&^S!n@x#z55Xttz-q8q0#*QJ< zMQI0fQO(#KB3;grV2|hi-_b*E^QSr;>{bZZOk&5=AfZn0Wh@@wqeSQ+McYX5gk|yD z3OE~!w&!Wz;w&o>vLA;(v!4TK=o{N#;f-x=UJepx*-7yDtvFbV5CS{6P8^$SC(`Ig ziL|^mPaHRMe=cMyg|u4`%uy(BW#BB>@|Tq3MzWOa0KT{$ z)Ms<{()kPcal|r%{rUTP%kTH*B2;fF-V1r-EQPr_dLVw=Sg4=jkWEji$%t5nh)b?$ zyxkRzeU$;>e~mGglI1C_QMr0On}Lc1 zM^&cmOhZU4cT5oWu9<5mLZk3g@?&@pUj2tOgjBk{ZpFdAG=zk5#{?oxei||@`$TVN zAs>v|eS+8F^toer)iFrU>+5v-DL?+U7Q-6O1!ifS24AP)$gJFw;DPZaE02%H;Wbce zPG@mRD9tO5+x2xWnikf*7=$dZnEeSIg!C7yPb}}upB8Sz;d9~J4OWO>OJ6)eab$UC z4@ITu>&(-zr_DN*ue|hh_Ja5_?FkdAw}kiP>KZTby^xm2^2F@9&{ugZpGbjNak#B2 zpEP)5d78~aEMNZvvc)MM_N%HZDo@@@_v$yUsC+K`)$NMP`CCm$c@8Y&mOB;^F%1@- zPbhcTeRNB-tW#Cda`>xU;94Q+QeHS(HBmds?L;8flnJAAmB@$egqV-a`9o-tH*TRF z_t4*qs6-$%%Fn^8miQK4sPzfE!0cn%JK zg%I+JwyVF&g?Y)J7H+Dt`h+ayX}w~MR=f-*2(yRL zo^HrJR<{y|ra?DZV09fVh98ePSi+n(9Qtb-D;hjFe&vZ#IQ*Igfj4Chgqy!%(t_?f zv=WDRn&GE&0bFqx&|i_oqdmv!IUJgvhR}xua4hIJQw4>Qhyy-aC1$P@?;x#8JnJGF>q)m`u zqu zi)d}?b0mZuEm|Y5>T_K9hzUZK!a}P4cprpPr_@I|06Lw@+~9ZPJuCZj0=Iis_K&W* zk%*NZu)=*?v!G+}c@t5O$2#OniK&AML!gb-m&jvU!WlLx%AcVD<~^%C%~TD_U*S2{ zAwTRgB`!Cr+)3L&;!Ffe5pQr3_ih}%3F@>A>R_Y>;W~Y=Qw(he-c0m8o(sUxuT1MP0iwO}QVJo?IX$BD=VTP+Mi=9M#WS!ytdhLB45g&P8 zd;bg(A9u`=l1{uyH7eAGjJXM)+2+H(*6M%fCXLGtI z*K-pMcFT*%fTa2fXz!h9%;;@^mi)KS9(s5%v_7VK^T-cAnT8T)2T2A2n=~N-X7~C3 z1(BluuW;nKj@L(A#N;&nh)?==7d#BzG<|i#el&UZi#*-r6&yk#j;O2F!`W^tz8i#3H$3PN+>MpcxXw#DbY9X?$aF)x zk*2$)@=F%K#zta_jTAK!(Uy4!#6?WI0|F@GU<|5c)B#bF;K$rNBJ3sZuK%(daSNkY z$17WKf$6H{qE5lZvL(n2EU6?F*oN#8nIlqBE49)O=O(~>knfVtDv_QdMTkN=DL^y%c+ zV^FQg*4K1(isomT-%wHrMTACWK9%(AouEVd`=QiL^@E#3a-j>p#$QflV{bYKP+s_; z2`PW=PvKty-&+yj+aO~%SZ-gK1FG6CY`5I{)=1QaGc9NTLWLH~j!#EZ=5_fUT5R8F z7q;7OhcfdZqOkPVNVEV?Y+T~%B6vcS2$s`+$wl%43e843x8QLGieSO=@u(a_Q0FdO z9%Ge24~!66NzxnR?-LKU~NwCLMH41OaTu^=d&kL&3ab#B7%Hs?9YCEKJUnnpo> zb(10DQgYdC?Cw|VEr*YQ0G)v9#2OP)mZ+z&x0o<^e@d3T>oohsKd+_&?ycK%k>GVq zpo=-&biV}@!jw@EY%{j!B63SBi55=R z)^a9L z&PtoBu0_ZW{OxceWdDKIvRu=g3&3jO#5-Rbsf81_Xa5X;m$g#JYMGxp1Tk;H_5)pb zUTYUFZ>Qs$%Er-wSAWVO`q%y?!@*Umq?m}wT3xujS+pE%qW!gX!3&CqOum$e7TE&l zv|`CYu03%(5hw2o4a1qMNftsxtt0OC<#AYQ-*8~hvYkrsD@LSTf^=cK$I}HqXhgJ) z?PLZAP9kRK;~YIW)wub)bMQ}QBJaU z|Mkj9q-6D}s(eOJGeKkfQM5!F==9!i+5G%!IuhHAA7K%{0it0mUh^E#@o`h=>+qiC zfn->FyX~P@U4OwClW^OL^T0_$Kg;w8 z``F$JwQ=TEo-RDEwhL!oLF$Om8vlZFd3s#^y;{9`dIkceCgHZ_XKQb3DOe3JGq4!C zM9YdFauJI~E3_~~$t1ltS)(i(ZycIFg@)3HM4nZRz>^B|C0vUh(1q^%ZTzlXls)rIE)Ik^kkDeIslsN@Kc z&iNFy@&jGie$n+WjYQafq6^R4>&5nH7cQ^$VtZp3o?GX|_RqU;`96A96+_3p@KEA-{uK3*qDE9W< zhGGya@7W8qZMu+c(`SpI;0=Qc1}-pOl>Uvicio_3e!aa#tUW(#4-YEnxWIU$I-Rw5 z+Mr^66Sem8SbKeiQ1IisQVV|03R?d2BApw%k5(v)!CxLoU%}W@%3rX)8U{7-h~8Hj z>+2EL*NcM+F4X!eguZCe1+3`BgNj}{(HLa#&#deEdkn=WZzkvHU01O7a#(v$E-@4w z0sQ~@T0z1J{%{Ty++JxYm=5!=7c6B3KR$;;vie&N$%h>V${b3>&09vQKVHeQP1iI2 z`$v-AnOt^dhm@6ljJOuQFcK+S!PGa&sy8Nga#@>l{@|}VEhuYJ;wPXX%SSRWeM0~t z&%fVtOKL5fW=+5 z+|}uL+N;ZO;@N{IhYX}LB7Rw8B*cYBrD z)fWGW)+9hz^*;qh94@T@0mAmC&95<#b6xQ zhSyp+#IBt~ZGqcJ!^(;*#*PqiB@9B)g4Pmr68j91is-zTO-?RWvj8&!Gw^;UWFtP0K=UHB>gY)wN@a=ID@sZN31ancF z+-LMW_BslI{iMw-9Yq?dDeMfwMrEqXoD7PQw)+f0 z-}+URWpP`Z_-j=^WUrf%c;OkkI*}~1zc3K9-%*^b0sXgJdans7V&AFl_wefVO#V0I zFOf&QrD0CA{kb+5Y$=NxNjSO!Q^^iQVV0&OMcpS*+CW$2lMAz0wrLY!ni8&g&jJTv z`F(9}LK;ibzj@6I@We*^M6?Dp40`+Wx@9J$T&4aR{KE$MM**K@_c;iiL%jN$G{#rq zOk4HATqN4Qe?J$6oABC+u)Az)59Y!?Sc}pU%vCET+D|_NoX_qqJQuj1_GdKCd-Z!9 z6A-r%G5hQHMk8_Djo0O%@E#nR&p|4294ystcj$J=f=mDG)1dd;wchh#Q;i!*d%vZL z;0@a-*&irW?gyb#$`}P9EDypega}klB3v|KdoIEe+YeiF=q819*P;!x*26XM>yvA9 zP`D|O)rB)HcU_lburR0l|3EIH^J;-B!b?#;@ZgX{Ps~0CN{<7*MNvBm?^!W|Hd1(9 z4vKHl`FrEep4vh0-@b|p-1$bif5-YYhEBE%PDz>-Af1>~W*u_{{rWUizYfkXBw&VMl zq?}L0?7jOKkDEutafj7x7A;@ibp|joBUc#qJ%p529Y!X$;7lm?b!DnZy31i?vW^!( zv77#ECI3(Z9M*YR=R9(Q6OmTj5z=V{qQ#qg%da=OrU-{1xXUG^YIFq=jxml{tp(||oV++j{heso4M`H@{ueiR?3TKay;rsv$FyTDv*q-a^RnH~7w zm=JPBL(u@1AHNeZA{^{dtF{x#vbj&o(-$9+Mr+SW)Sjf(?o4B~acH~7H~^m)ekTnf z&??tho&dIqc{)^$7H#)!%|#F`;Xb_#Tac2?6&cO6iOX4c;RuswiO!P%6X;AsNPc}R z&vX=g8u-wy^t?>ph z;g%mxW+S=g73~CK_YoGS1~&cM*9XY^s;$C(Ct|#wMki> zk!H|k%WJl$A^mDj9*$84Cci_8+dy?VB)Eh*&6O8+ssHVTEmH!=@G`2 zhc}|c>Bb;Fk98m7mdX2i6<%Bx1O)VKX7DQUL8OG$HJxGSpsa1xs0s?H#yZQY*#kGVIG zj-tu}#$R`*pjnz~mL`!!f>UZDsELBo3eu4VU!Z{i841W}l*NqOAn70~U_y68%Ayb) z9C01T(Q!jX(SV4OCYTMCBseTV*+;5rmjKF6>UZ9~uevLpz|8lZ?~fnnkm{aG?4&WfuLOgj2IT#2M8j-sj>Pv=XE_1U=aYG{7DSp=5r>1 z_o2MFuF(YW59c_*BNa#QIKY+X0ARHd0q%zY%i9s=>&!5}1X-s^p3ItPlL7d~Gl_uv z&l!L}aPI#oxEtRD4UgDJWWm`>Jk2Lt7pz1P+-vz}=xSw{=7G};ujHO~ z5qGs1x&ZiE`1BOSrx!zAAkTG>XCUSYoUrhXz%a(wv0F9}%BzlN*ChjyZed&em37Gm zxyZhYWkS)%uoFwPbPD#d<+LloDSlNkJIw^39;dHmIAVPhAwW_F z5wItwu^yE+VKBVEtN{!tLW$BAzo1rdi=vLl9_!4}>P9D@KJ-CSuU5R3BUT@Ips_l%819>;5?*NjT_c2+ZI zRUYZoY^-4mLp{SEvXRIW7qJ&mgAzEe29?E(P9WE<20DJV1l+WWPw3SvYgn%;g*(?I z69QO^s*dGr5PO%d;R|+4#~7+k!DSSDGyci)G29uS-2uly;G~Oa%U+A0eDore36+;h z^DKk}39TxL&6lwDBQ{^3p(5l)X_Wg5(ICiav2ZRmpzmIgpK|29O(Rt7#7*_UAaZOv3OhOG}=zTjRG#LrPKR-ad)uES9bBgY1jkcLqEL|B*fZ zTuy3Vyp>w2sp{~4iLCe%hG*XTj=dbCOESbV;g)aV>T>`qp$;Ah`TA6fWkP?Jp!V$r zWJZq)stD~_qD>&shH&N!#zF~|Cm1NM?d&nYwS_&z59=sKIO?0r7OAc8NPLWgB|CT> z^;s& zOI6rrBrPk@b#3?O@Ua(Wh0t*BO%(~7edOeuX)O*SvH~wQ$ED2ifh`TFyphO zD9ot&noDEhmb?lTSpnwBHDcGLpvS5W?83H+@Gyg5fB<74hw1vVKR@)L@WsuGk<0In zJf6yRVGZskh43iO#*b7Rt8$8qt-ldpC&yK6V}1hBd!PD=lmY90kP!?dmxol)8$EH$ui*e{72P4{vPq_#a5wq{&$-;#d5WRB z%l6&0p2)?uDmJ301@{pf&G_xZLp>n>)=7x~b?<2*+Bk5|E{MlmFaf(_69CG}pm44@ z&NR6Io9)8a zbRLL=;Q*DrQjYdUCm6@@1k*SIVZOD8g=pu>zks9-jGnGMHo#{6=%mQrD!Yva>;!$v*V>^XGwPp(Khoea2KqX z)4*v#RF^;=Ll1oE78DFJAQ=gtra3yoAYYf0I4y%%m4E4IA!x51IBoMc0M89`v!eqP zJ25`7*oAHuq75-a!bT)g$q1Y2!oHtfah-6*VgCh}KB5qp1zuP35Tbq0LgttGUDMzb z@QU%Hv^%l=QF)2iXbZZT(1f8Iu}Xy8h2suT&F}IeJ zc$0Xdt0nF(*~r}mbKgHT@t*K2p8GWez5RwcKN^<^#Yfw%`T#WvGNE>Ch z5UswWg=ibuuXT1P{L>pfu(JElTg;R;k{SOqN5#p{k}ZrHtAH6L=T;KwaSEvB=?^)Grn02_Pr`>+a~wpQ&pzz_9T@tV zZzYpR?(Z>%{-)m{pT5l0GB|xmZYD(Rr0vhQk;r?$T}od60cowjDij}Z@#s~G<01I% zlhQI9qBg2;rtZxP=1`?ieb7dgwRj13vgk+dEn%AEj4AX9nO`~he%$#C4AJtVt~j6| z7rA>T^x(0hE+C(U2ad+YPjNJ!5?ySvKkvwTHCS(Df3A9^)5t7ns04zV@2hd!srZa% z2TLwxLI$l`#Hh&)!O8|KzfF$9lR;%`+~nkTJ5_}{Bb;EmHXjD;+X_DA27a|u z)iq#-gZl`q@=7UARQ*c@)jv>e)>2dA8#aE2sV;|yGEK?>OPCw2=3!DE&Ct|6kZK@XNDCqdv(4TIjrSWkr5B9&64H z+=;9ATgc&CIZyApCh@~RjsS>Oye2gMH&2~_D+YNl-(mxXt@i!x0gq>Ehg>Afw%yO;#DI1A&M}gt= zKRJl@A^Y&zd0m&i(jYlk8cG$o(FOKeTVX}*5+xCxt?z`?5PEGpif3J?yG+^OMq@=M z2$VJma(#j*f7jH+Tn}Qdp^$4CJ00FhvJh=0HlF*qDL?3>Z#~U$shlefk^Rz8?I#Q5 z+h)EdaRL2{^`d^!P1yR>R;~Zs(bT$=wLXBg-VyMj|KAUPS--`!@6$&in)Tr)NfkXzszkCliRb@i%^zkVa);*3{vSu_z2)5c$jBm?0BW%I}FUyRfk<5ky-Lr zwu?*Kw~7W?ikH_xLe-MlhLVC<=5QQ#ff9t*Vj?*Kx=A+@)3w*{vsg2|c~3hdfT;$+ z%R$xVABQ*)yTCI0z#$j0P(^s6k@e2;WTT73&b-6=wV>$Hjw)+W(c`3l zG*#w0*(o@TDr-@}1Kth4jKOx${Mu5?FNXF2hI*;E8^5XAN(hNQpLsG`|0M_qYqhI3 z0KmT<1i`?(Ex=G8z_+RRcH`?Hrpn!dQqh+|@@MkpqfPwMN}|6~Rj5D6505odbv@FZ zox5dhw;ydbIVu8LO1y!f@;6n5mk#nXdD%f17qQGf=pwA1(^pwYWK^@RR|@kFx`-t* zfYHDouOlR~8h3k@*;g#MLJ9uP7xFlB()BFq&qh+Zc}s(#)i@(5Vs<$)geBbwN!t0z zHWF!n;bQvgK}+lP|M<%&DH;n0gV6CRt^|MYow^6=k6&PXYnvqg?|`Hr~e4{yT`?@ z!pxiCc7Cyuv|uvcNj|*WMYP#I62EkeYAYTlq%uh&+K@*GxX-Z%%Zq-|UVN7j&GjB> zsf5yFH=lInk3c)J_h5NZFRBV>cJUOIL>f!JR}@xaG9*x)L}at@TShGy*bZD zqKCEH{%9l7)t4BLKUrFhhvHh!s$rzfU|hj@jeJ7?J_*Jrr_n{CHY!)sP`MvnWUBL= ziYt|Y*HF}vu`;ap(eE?MwcZSp?TW+p66&gai%Z(x7?akWF_zN)T-e;P_l!x5#E4F& zaw82j_-~@h)HJHjN%Jbgr!OXxks*eVs%{73GulYyvs6BnEthAOYyW!8MtpKPm3Kxr zE3VQP0j}$4RWSUvK_4T>z95aN4vnoxWn~xz)*Im&Rl#SPHtjL=AhC_ALcRgm&AGU+ zaTcjM?6%!5#&})-n2kiPfkVcs*G0k%ZGAq;M!?N(o8y0wTwFf^V;RO|^;#IvMekK` zo)(Rz3QwoX3OLQIK@zl3^=UlMeC#}P4g0~xh`k3|L>|$W3^bo*>G`bQ6W_Z~2au|+ zbKe7{+EEYd)$VG4FKW2-jtSaLF1;JR=c96LlCDR;i?sw8zwbaoqARtsi8d0kuyx*Y zeLvFD)~*E=s;qDVo19vBEmb;DceUA>4ON9dn%zsKvc`&kQ+I188 zEmCYhwA7dNF(f_#Zc8($yo07UvY$?>%$L%l?WnxdCvTRG zKDG0++4To}>D#FL+roJ?P?g~+9eN{G7wSHDRgsgrw-w%1>AhUPjs_~5qid+#9DR?v zcP#jr<-dihcVv_%-H5ztdK06yPwk@N)5B^1CTiJ*s*&l(z{Z?0Ff^`jTDAka_aQ#s zhDV$0?IBCs{?mpP|7{f!13TH|)@$^$q5HC z9`{alhz$T$vlx9pqi`RMg}aHnMKgFwCylv2)D|=wrSC3wkjz{3Ss(40ttg)?&!r)A$MjE^-=!bZJ6!+8_Q6 zbFp!}jYJ;1sJ&aIwJm?|IcR(fQ{0QnJ2LfXXP;uD@=hu@`s4~4+T$NXl}RboU0Wzn zxiNZRu1M8McFaNLbJ04gOiHD46?Jbe>_X*T@VSD@r=xqs#e-R^kbM`Gw?_~037HQT zl^Z5CT$vJ6Mrm0*mOP#noWlL1xVqWmf?7vf) zkf!9Nxf|#D(24cLRIrG4Hx}iC6Eyc`e_v4P+FUe<&#uAh?Mosg`ccBi7X?D1FH^Ix z_I75L5n|7|qJU~ZJ4?>b9}HuoeU z+P%LS#T8E~*A}Fyuq?H&PC#W}o0a%3W0n1MKE`DwN!qLDA={=YiJ*OT-rVMqGlXbX z*5=Px?_9HozbjY)>8L?On=RhJZ(j#U(i(g=66t&4()u5lSrYB5`R->Eot;XRoD_%- zy@R0{fv4HoJP-GG=jQ1Ce#*o&B|FVsTj=7e!!SeMYa=9DlkoA3m5}HsMqz=5YNBh6 z)ia$hk)jE-D$Cy1%BHI0?eSm^t2nle?@8zY$Exw5`5GP%p7dTHkDmTy0lvqNs4Acn zqaPqR{OvWW3L~B~4IxA_tJGi!pBZB#k=o`<_&59~Y16+ukF1@+y7Wh?Oi5)J;vT9z znnK+>3I(ckh#rQ;N|i_LjHm2Em4zwPy>pI0I|C z1bXt-Xd8)mbPz0=&ilC+oqLEx)(ZmVx+@u}_hbq?E(NOejz;FTqsm?O=mEfIm4%%s z!-as%zGX=x?9nDF?}|oa-o3TZ1>NNQRlA9S^JpvjLtI=#+V(FTDi?z<665X%4qQMI z3?)M}xZqQ`fw;pBJQya}(dTq79?*7-tqw>csO#MIr!ApS2jlGV#Lgayn>?pg8E_=d zy$qWjiF2piE(2lI_ubR24eR zPLxP}{vTLCL24tB;s4dY_-hyI59J+7G5UkBj&oAkg8iCj=N-GlN#zt%zy5FgvZpYS z>iqRE^kw}WHUc5M!M=9tQZ5iHVYqO*Fm5R@h%eQ<@KRNHp&nFTKdyIytoL2yBTsBW z;ZiUQq&=*EX8F+#ROwSkm9=R2xfY3$zT@g$gqBq5x{c8AFWsnn@4{_V9(;4Xi$p7_ zBD`u;3f2Z}5qRvTix^gEJyq7O!b(x{-Ph7~rAO;nrDBrI|IE<&oaHuEUQnz{E!A%fW~+Vl-iW z$=1aCD@6!za}mX{X&WdTF8T#|)uH38z}a?1e~dDGyU=Lm(~-K#`E%A2EPD zrGBZKHYN)F&A-94*g-*K&TnSQLqekH`9Y>^;3{|w&xXbb3ywp!fkRB$z~YmYSG-aW z8P!5Y9)o<<9#cNB9yosjWmVlvlO*k#pCQ+`wOEc%KrE)F^59whsspf*$Jc^UFfUysh{bv)@^<<{0EcjBHdIEBxRmB3# zb$U{wglo_VLbSgiJnipEyd?P7erW$MBWxrx^-}(ihok?Dd2GGTX6tn}RRqsBE}~K^ zRb01y<06(isR8{q;ypj+qI-P^=P8?A%{%B}7*;*t*u zA?_+M@D}da>E#@CK+plj$xsId;P4`6l#{t9I*T2C9;zfYQe_RpNj{|*-QN|jvJk^G zaFCRp-sF>c2wt?U_ev@0+khp}9_JSa9;)VnN_$2u3Iqq2x+~^C$F{?48rU+gBC|ZR zAu?Onb#=7WeOg??FsOT*7*^kUin>t65vp|&D&r^B zv8kU8;04FR`R2t?KvfUg#!373pryU9vOBzi&D)jOIIU;cxT zNaF=v*Oad~BU3*vkty>00K2S%hxNMcc)~pXirEJd`nL|#@cwRGC#;W?Dt>7Qwd|no z6NRH`_&{f>2Cal75u^<*_0-)o_ZphM7gdw|hH8=#b|6K|?MF;vwl7)PXoE@He#Y?s zDgFw^!A|g*RIr6h?6wVqwUxgG^)Z}^<4+-!(rzaprM-PiOiFvBQRtT828Y0QKPu3K zsazZFhZ0McXVmRh!ad(ELLZ<$$Y1n5(#5K9xPn(VW|$#mP{I8WFj_kx72Xx0CJ3?D zuHtNo@EJ=~h1ZQ5-nmV05oB=KstQ94{_4Fp65ZY+v9JO}wZVIBB*ONV*6TJKiF|dD zcmw_L{4ZeRm(Rv8Un~<+s&TsL?X?)t`z1e^QAO*b5YNTO~;AOr-3a671}>G!)ff725MyoVT}&EHj&!+Aqt1@ zKxl(R2+_74hA&8^{Gt(}b@&e<+HY7xvYB)MM$>I=7;s5auv`p&zym{x#sxQ51S@&o zN}|7Nw`|3g>50CIS3=o;!YiStk+VPhx0qYaX_6C-_xtF&Zg}74wcQA{8RKUR-rmUP z&1Op8RA7rHk52hpkgT@Arwq@w3Y5jTctPh;jJhrXj4<83(tbjyrLjBFryI=341+D_|?ZWnWHZ_+{vHeG`uB zbbS;?oi2u>i>{@L%NgePsQ-X<%jhGe?-G`)Q4B1O>vQ?<(n1f}IDpVu@VgRZ+?s?m zMS4B8m4ZJu86PEQO1l%XFqx{Z7mAa4q{OOX+AVP=6_>mK_DcsU%WU?e&r)T+b0tZ; z{s%y!-Nu?QB`7c5`J;(bdi)UNdS!4TmzIIKB*--hb8Q%v$SHOF5p2=Ec_NuGZtkUh zglMDKpG<81*rY`6sbD)C6h0|co+e3}{5|9wYF0RgC^Ks?ASH69+wnj+H7F6vr~hMu z^8JH^Xw`cO(bi(E|L`VKl}8)kl$bu$N+P$NPbAn{%10k+rJL~6CQjx2VGz)2hmH5? z7?JI}O)i2;fI`hCpo?nbUpC>Fw~aDS4L09fYpH{6B=Y1X{cTLLw33giLY`6IGdJqG zB9uIi2I=+|9)%Q#dlT>jw{UhVR0~_C?-dL_l-~+H5x`^s8E&Kcc7jiXT^kG|w!mJWgln?uYo+En3Lo=jCC z1(u>WA=t<@>@5|pvCDpePeV)*5UIR_svDry_Rtw@Rz4mX0LW6f!Dzk%Z{8d}@y#1; zB(l7my?=plyA>c?8TAM%3!!px8EA4SD*!QC>S(BZ!Rs75 z8kI7b;l`p0-myJbI)hPt5oZ{pBtOmlj*4Hn!0%|^c3-$iW(`$czi)JrpcT|kg~&#n zZ4V9AEbJfmd^<&`deT7Fsw40ux3s0I@Zm;28jEjeamcE|n;T=B?e8@L?+&GRjl&(Io+6PBay+a`3E&9HKBcJinw?Ns*IyK$!auQRQXd0g5thd zR=NdUFonKij0F(w;)VmA#lXWhu9P(%#ZL0uHt zX~8pgA++L{@;T}__QG5_RL#kO4O$PgFkhN2PA+E)u!$|eCT-ZA>}-0NP0*`dZD2tH zldJ+Oz-=g%rQAG|Tai{JNkBAIaaDY3M5cuWh|87x0Dg+9yhezU)72|^w2|Q09R~(C zX*`QPR+o19^YLtz#aqB_m%@j&WD6T@HrceWhaqZe!I>BqrHtP`EXHRda*As%Y71|S z*B_|^nS<3oI|4V<6D|bKTExYR;A=ZEB*HKxG~ODR$vKHvwl!aK4r8zR+=O^lA3zdL zz3h{wQDwOype@kC{{}R-^4dh1sJ5517(DIyy^u4IYRU;NP`})Z2n7P@X-#;9t-Cex z2xH|HuQZJdW7qE?MBBcTl}8ZXz28&1*PBcwz`M=RmVoEC0DSy+P%7n)M9N|A z4+(%@iGX))1-S1AGvEvNK(>*s0KXCe&*y+wvJt;`2P=;tghwTU5U53c)f=rO^4PgH z^i5Q+q@n%FW9PbimX!*65ON;KS{d zhRWyOLY3QdXsFzu#XwUCxcXxaPA{k7{TPFv%g_v?Rkp1iopDn_-%7TuGJ>BN0>hZE zR5RS#&IherPQy;HTdagA_E3$uEQ9kp^ORg^P^dv%dX^C4t`*heygIO!f8i1Ig^mg< zykp?kjG}522$nAa16cM=Y5_x?&48iOmH@+YE5cA}HNoI|#3&5{o9P`uE^Hp;^sRB< z{D2!fa9-U6OESJl=3Q#NvCOxDc*7C$wtN-do zf|eH3arsZ)1sN}-m@e8KFdf)%DGJgCPsvj~Wab-AmwOkp)aB2?q zOk5d^&hH$^J(=}HWIo2%*FH}??_6NXNhIP z*AJmD6)~xmRHzt(iNV-RfWV~r^i&0-oy1_ZjnU7CY+?YUWa{7{OC5x&8^EtQN{{@K z$EyfUAG(NFas2$Di{)qkts~vlVqhjWK6FxfzLdg}qLLWFu05*fo*QURofb#6rkeg*wMF9^{V}LPlnmLqZBmU_aVJtV=^htwtb* ztc<46k~4Mf2@#h_@xw@zL$nhJ~ag+ z+J^QNIcZ5HTOZ;`h>#4&x?f6*e$J9oBM&v}I;#hx zv4gS+1K5z~$(<#w`B&o(gw>PT5Sgmu1mwpGBAl#HLT6Y*;&$_xsHvZ7qwU8 zt1=^DRf5UYSapIvR?cx|mz=%8rXa9^5HeozRy!llH0%1v%<{+*qu#5p|KIAp$0$&l zL5Q*WdJ_Xfn##qBSa=`CUtoiCh(uNyKc2xKNrj!MIyhA7Ad#H&x}JG5l6_v+=_Vcj zJ(YPfGW@)*YZY&Cu4&~k6RH7u-@HwLYO=1dk;uyl4k8oe=Lao`_oHH&Fs%Ub&AXOX z_y&~JC#29*jRx*vF7zZTg<2p4zsGbnYEXmPpJP5B+#5Sx~}`wJ9M8s!kJxinyt#h z8>uqwoD+6l<{rH8V-?N1j*vF%sBG4CZT*{kDO@|;v=o4We)tPm3O`1og=j`{H0C*@@}7%>~c{Wv&Fmv_+c(RP&Ua`@Dw&VRVXoqaQy#6Q+RNAuxr|qJ81S;7p2KEASJxD~xyb+(`lje<6dvqDA zbQ>FL@Ynd_mF%@xceoi<{qc(EeB@^cm-1YQYsCV z7u`T(5%QE=$s6m2;%A3Fhn>-Q06=8C&doMs_ttFZ<}iHjgn%s&y#-W#Vy5^$RQVaA zy^=&o^ck!Oo%kM)Y~eRt1vIc_c7^utxI*n!Y1*#RHKbEmd|FNON z{`q5-ha_iq$py9>iY8~)1Z#@Eg4y@Uoh8sT(%!k!Ann+(&c*<-%qQHB4Qm~z6RY+q zxzcQ(g783SCNZ$rLdba8`%_A`?&X)3(UqAs(dATmsvz5TD@{K|OLQE7r!0hg#(siU zKz8`3Ejw_~mZ?WO(m<8nC)baZH-oy*1hywax)}6W2+0bZwc$1ysork!ie;U$#m=(oQ-Ur?327k&|F_!eP+Jo)UF?M>74*~lm!Z_z!0?-uIstoH&S0!QzN|~OyapV#&JafATXrDJb%5Qn z<=W^5DxZlg^mR0!VaAXq0im$)Mv0|?)6NOxp*8^58h8)^uy=^4oZ(H99 zVk{=yFY{|uP*!H^@w4n!qt1|iDx-zK^=X{eoI#bL{4{+k!>25%W0N0*8=ZBhPqbLOxtEPZu7HcK zM1AHyGg;vO zKWqEJY;|a6ut5xtu@Dk$pCy*{$n>&z4Gz{x1vGHhE{1fDgQ-|{o8pslJPp4F&e%s@ zR_^UzY$e=JmeQ4m8vG-uGSx{-&H;u=&1?uZ_&b`(0K;tN_Wp!Ke-s1g zLCf2iLiO977!2d<%!X*SSa!J{Ziuq>PWC@>vcf4ow>i2@onVo-$@N+5oiG;r8JrDS z{p*^%_IC0aJX}ofeEkyjie>G*Yyh&-H9_r`JgRn8&iN=CEjrS*BF|u z_UI6_vYI-0gZ2In(YLjkufW#0`%B^`zy$nk(4M@a8?nbjX{te6HKW|F_If4d3>v+XnWzFM%ZMo%0^J@Cv1y%*;xPQGh?h3S|FBrW(>88q0cQ`5AkDuZH;Pq6fRy; zGaEAZM3ZP>vz@wY#lT%~?Rl0Y5i+L_Rc9Ad_YTI|ufbJ#d`^2{T%+HzNlRnA@_Axf z9BOAPn8x`es_j}2puTPf6&8seZhQS52}6M`&7Bk0-?zK~YJWB%D*fJEFPLn+6*kXz zb~zir^+rkKw#HrUPt%62Cq%2>K!~<>y=hFq;#0|$(lYme8ywArT^QM_si9#)w8z#H z5?!k`t%FRb&6z-$oy;;#ga&e@H1L%b+=Fx#*MD&{zTywJlJTm8MDGA=lv}6 z>rhh+!tU!@%vDS$`c_>3{VycW)fMFlxFU?xHRzf|dCzHfa@^Rmxi_8zv^mUaBavMu zr}V9aJ+I_KU=2|$6Q0OLxz8eEKAfH&it3>lfxXFY`7erzL4qW(Z3Q@BlJNmjUE7LM z{KOt&nWF=1kgJIrQ_i<=%oJ$RTesFNcZoh;1(J#p=9vQH#AQ7hllBUg6jqa0=u$| z%3LsyX-U(2W84zMEP_GT=b7mGX}^Ixp(G+7;?pKzF3sOCS-hbgX_54ejTHY4y5dGJ;k z6&|l2M`da%4Igq)WnuvhHOw6*SAweBP7Ee5ca~xtEJX{7X;P|o<7hoWAwDfqsOvf_ zyfd>rS{3K2w8;(|iF9j=ziF$#?@mZ&{Se-X#{2tR^#8WMPu}wXroV@>|M&jh-?{Dn z&K=ZJe=^!m7Xr&%L|Y68R?O4YTY1Fng(;=8jYP}ZTrXLcMEys_;hvZrr|j5q*a>8T zLWgCb%y?GiN@tID)b&buwZx8lstN})y6|hh^1n3eR26DeLlM!jv!lbL;^b%@UHjTf zLZY2?;T_dQv|>g!-<4$Q+K*8|dteK=@v>--UJ-ZW6``tduL0Pu4!W)ge~Y+CsN6ra zQkV=Ns)8pD$hiLo$Qxe~5?w@9p_5VT6`gcF@-Rs9hBYsB5v~7qHWG=l({Eiz8;QK$ z+{XG;P=7t2jc*2@LcxU<+vNEWELH;oj- z_AI?-WT-||j~ZbOo3g~PGqW71&d))GE@wvZS&LuvDxSP7cM}FGi9W03U2;ztwm#FfB`Kcwq=wvq+TJTiQ8XQ|RF*@>pvaH5>-@R?}ghmWCj zfBcA{bRViI>Y+IH9&wRSjlZMfcrAyq!%y|2=yfGKBLI#&b$)BiMl`S`zk=^^p~RmY zI|fE-e7{r0l@7ld^8L$)$SU2I$e;Xey+IJg;$MFXSFqYN0M>dL-V38wN zB%a?x)|(09shF*vBogag*At8=mWE*$x_%59cO@kf*xEWmv_E{v$1(B)sH;a-H}hDY zR2}@{k1n#LC#@=n0B_HNxUD)X9O*!nl{*WZgp5;r)Mf>(qm>@DL2FjYe%(UIXeDX$ z=&Gd6U@QI$IGD)F2o&RxS9zrZoeMW`*k!*YmF7zQyrB5h;{yQpX7jyxF9z(b>u~b> z8HD-NPax-&t-959ok=}?5av2(Ciq~KqGcxa^e&$eqNT4P#AyG>(8Q@7^9J&lOX1;Y zZzGXBla_i*{5n2CR#wH8g{dkWI%4HYmTd%=nk5*)rEo3A%oty3XCu*6#ULE7+- z7RL|Di(o3Xr>L9qq6UtTi!hXHebIx8V-5Hb&6P5gsd?_L{_$#7vpPj@52Ei5XP=5}ZD__ljks z?(>#=7l!cHiaoq^}*ieHQ z^tWdP>N6X%PaUVqojKX^78=%UK6RioD>#yjmH#tV{P19Q;6LrNb|A!EK+WnC8UGzFg&1e9|BvqojPw42DN@ zrFjgQ&IZqQMn9-rn=PM4v_3W_h#|)p^Vly__oJhER947-)vFc>gDsZG?)&vJRf$8UIwVY$?1K zpD8Z7ofU4;xXzbx8Qz$%Ox@T=GX&A4^#@8ZJz4@kS=nd>YZ1*s7G_ZQnZmoNGOmD% zA68R!VG6a0%CPy_$t0wU!Ff8WTY4zmo8?M&t$#k@;u!^(;-!W@ax?5K4A#LqxF*)Y zqNl;f4r6b$KXGGdyIAIvu99o(A}7PT82Zji$gEJg7}^NJ_m`oG$`Lay;DA?L_hf#S zd^){qmONh?ILqrU7nl6q23C>RcuM_J`Yd@wKG;Am8>q)!uoW zu9P8fUos(2T$d{ioE2&i7e5L_m0!BXbFxwwgFl$$KtV#qFHh1y^9V5jk=&GxO>2_a z5H?WDSt_r@H#T?d{2T_R%>ghb4DWA8RoBvQ8Brdz=5*(Zc8sVM%P@9KDL<~dvOQLS z3M}qMaV62dnvC5`36}elOO4Pn95X$$~FMFk&S&W>!jw&NyehRdqSY}^RVjI&rD5&TkdKa?w zIA($vpp0tOr27hy`q24G4?@hQ3cpD5w$m^dY&)d^Gb#ygPn!QT_7z%$<3UF}~FrG)p{ z%K77sFx7w_j*C+^AE)BLYV=2%zME}$C%BQ}4!~zLf-|vCZ)@eh{l3XykS=e<5ZF=L zVJokD`*AU^A^NK3ywl2tAu^5gHN0Tttd&GQX>L(&Qd;pz`AuRt zD^+H!i88?{S5R=%<&}m+(Jc7j*=BBe%rYy}ByRfbt`iqwbhFNq*7r=a>0Vck2_D_G zb2gyc&)r4SX#F92s)BK2MZ)J0$^twKgb&$3$&D=(575|-%5fNb+o`QgF)1@jBABKfO@7xU`s0WGb6K>aHuC10E069-FDs zP03D`YuI99OtibYFe_+XPpl;7CwyUds#2Z0!-a#XGHvr|8rtLk4_6dsb6(|@Jm>(! zaI_b+(vm|k6!Na<7uxm(e8azH*7b*L;Zl_KPdIHQk%0*=aVoz4`r`Y4?wFSHLTw@A zJ9xa86e?S^VIBF0P`ST5|C(ht$qs$IMD(Cmf67Xtf4@|F;^IARj&CaZI2D7hAa&=Y zTuB$#pORyzvXjcx4lerQ$tySeM^YurNr!>*J^2+><1IH^Cu zdA5hUJp1}_U5_3v&6PyhUq9Ok(dGe{*IA<6yP;T?D~Zv+Wi~{gg)-~8k5C47W<%r& zUDvfi^I;CYIcX&kyM7V*fw*{!r7eHQ;L)GM`w(L{8$I7+Yr*>zD&kP^izj~bNwWegdH;c=j<)mFIYNq=oyJ3jt)E4)* z8MlQLPKZ=$=zjU6JnF6$m#k*R8IET_rf+9^I$PdFPwk=MbHi!>MxWx7GP2b{U43!| zoHe;pnpiep>W5tpl`rhWwthapK=Df%z?Jyqy2vbD*F8a<&Hm4B)9lCm8Be~A&;GZM zvDt^2KLpXL`RKFZi4gPdk<7$o$zBzI2Gvwn9J7e-B?vfU8d1<@&roDp7T#p?kQLFi)F)+G4 z`Z_}-8HP=SQBRVj)t}Q1-{khlootbhe;oR*ov;#!bMpU_{@O3m-;GKCbAJz5-3j`18GJI?~X zntM>yHEh1i@O8o+kt;OCNG!fJ<_N_TGjicE8d%NFBtoKZz)vBGtHNr%S?Ai4&t1a5 zEXUXWCjm7pQ=K%lr)UCnBXBlp;V@`oQF~%A(cq5Sq6qig)D|wTxrOgWj~{kpJdL5c zyKIDLKa%*ll*&$^=*RJFTHY~h%)e~Q^DV1Q*ga0RQd$Br?sVq_iu{IUF!|@2tR(tQ z+xz=c^(RDYc-qV$fghI!73zQK67`FVHA`##Q=$BE9%t!s49^`ZFPaaSm{rav-VuGA zyB#^i$re~9(=%(L?WkN&MIDJJBxTNoxzp-N#2wR{`2(W+6nzCtzrZJgKw z_GFNx6>$0tY!EV{YY+v;4XRY(s(LFoF;h7)=wZK5x-#V#vt@4-X~;p3^J}zQe}PNE zq5M5YP1>~y10iQeBj-oIBH@?{{5(~Krg=Q&)XywM(B|O1ff*71!>$=+`RM3gTc+RssOtD;E-tZD zs<`^k1^+BX_bINjxh|sgeT@}tEOIKYW$?Zf-V3j%fzuu5_ojiMR}rG!vyu>PG}hX&L*g}RNh!fdf+_MesgL&h z3dk3-B(`4jqRAogPtQXxXRFr#`J$=y%I68u4*Zo6?Q5)csXeiE8@BG1oX~d(^R;i) z`WUPi7xyFB`m>S5)~__TPFd^IS?lAl)-J7DX9R}WNgH?6N+M^@UtIs^d6Iaas6Bm# z%V)T8pOeblY544L8a_IlS{iBnSw^1AQ=C-JaZ+W9p*vrA2jW(B+U^vLx~}Gd=fW6! zu07?lnH0%ZoHGdrhL|ovPd@^{uZS@IT)!lLzPL3#q6(+y@D;Bg0ldFz4xEogS+q)6 zJ}2hEb@v=Yy+hbL$Hm3cL(?GbQ=^J$5i>HQs<8b=(^#jmvA%3EfO}B0lE|MDJO9M))gW9+GG+c_xGc!aGjCzZ>>4*vMYAUnmh;mgegK7?4kc_stVWC z;#q4K%e+!XY5WE{Nz#7#ksY6Gj@GOj$1ohu;&Z0fG#l&Qb*&ui zwoRXy;wf>Bw>0(#?2U4QgsP<(PKLjhd*HEZ2z$7^2R$lJ}ju08%BuIC52fa&f&i8>NjgH&8=`pm96Q6)!C&!Hz!RX1y7Zdf&Ewor60u7y9NOW`v+kq zZ-APVtLGRt6e}Q6mRX|MwL}}lKsh}q1`3IRayeFJ=b((hT08*C3Oj(}S8^3Lz6KjFhsNvTjqi#zE@AQgyxalUuqFGURgJgWWVFhDq(HHs z)1laDY?a)Er2`gd?liV)zlpV)a#O6;)SHY}KfvN?yxeWrDtoOg$+(FS3ywD<+Xhp4 zGY#)gqE%iC4|L?U@K|0!i_>n@9<-zwzZr=)te6lozk;eOoK6P4*tLZH$SjZMV}~A9^<~TKSdQ7I9P_%H+h(WT{8dz4L7{EM z^<^6LmpT!dWrP}vMp3y0-m6M-oV+mHB7`*?l}qv&{8ED=cOSh*l`r zNOXhtkYHmwF0v7Qoc>9$5i0M?EY~KnU#jr(P05fxgJF*0Jo9CkIU|<&D$M*Bo;i$B zYw1nNMC*h}xjd-=!f0#1CmBVfNsvB>r+2`j7X~L2tqzke<4J4IL(-f(97OvR%Tu{p zdlz$^7>v2bV6MZgf@hKt-sm}Kv^du2+$0-`{Pmo!Yn=~RN#yFb{D(3A<*tkHM}Xw; z?1^?Xgu^Yr-^eEh4hr~M3X>9NoL20&lISyEU-G>Dyfr<+)&bTzgbpZ8>&{J?6xZ%6 z*<$h4>Qt|#ry6MgdK!+{sInZ_#Ht)POSTqHr0)91?xu2S4jw&}fA@-I#k9MjC6Lqz z9ZKJAK4O*)!XqX{%gxeT7&gY}h~6{d>XTU> zIShl?+b5TUPY6(oEzDCNZ+16}p`(Nl^-(=rE|*U?i60f_@c_|9mq8xg-R!UM8LFYi zkvUBo;zPlP5jtyhFJiU;VZ#a&c$~X$XY>rCt!~&CyRXBjbTH$nDA#j0x0wi*`^V#I z0P5OO&DmBSa#0)mL$e`s^hx>DU0vAKP{hllO0S5E^JHe2*@Q>eWy|FeN!M9-L42o8 z+-oI~_Y)=33FEt>&HIO(Xa|H(&Egnu0hsyHAb;hy?J!UbhpIt4!6Wh`cZ)Z!CU$lr3i-BRq&=#xF zcf~7t_yRrFh){c!Qprh3sHP}WEOWfquP69eeUiJ&qei?nyH3PUErWLght&R2-dBAr|x?vCOgSa@M7yD`NF%f7@f4u)MfvLVLwyCDEPi{E@iO zxlJJo$u(Wj-mV zl-|eH;5Oo5q|)#K2bF0-rDqnIRppsRNTug~_UmpG&3dS^Vm%<=X%c0uj|{J3S*|o8 z*x;W`b>aK#*wEz^P@C(8&lm^idX_zO;jiowKIEXP>$Q<6|L+BXKugZDolz)(rJK64 zEA9jXcqxS+N0E;}D!IbhqHjV9A<=zQ{IZ4y&WtF`r>g7qkv)jGuE%I9=M+%2b2gQ0 zY5f7J_WkpW9z-k~)X7sidzQG)Kgl{vo;^)0^L4V6()-1AzDY^5WO_FS#{YS+l|!GiTIo^5XF-cLyOuc78{9HX$y8)NhIYW=VgkxxPm0w=fIJ~ zF~F}oj3bI~u2vxyRg$o^{1YoZGpv-yv&B_i>A8>XRSbVw zuu4;D_@D!Pd^wWfiy``N-f7yGreKcR$h{U4NllEGDK1{u3ja|<%Y#z6Hkw4$F&0{K z1`a88sTDLP`D>sM{MT)z;R8uj_1js$?Qm~ZOEFT{Anx&LayO3Y#44^q^|kRX zLc~jVy|}1}qu5lc+Eh z)hnrcM`0hTe8vbap918&Pwjkpw)@PSA0iKOe-$}YxeUh9vYpD8QN8Y@;>-yYOs(guEUyNPP z*0bGb#K2+;A=&b@>eDULDNCCUX-U-yMHQMlTJ?3%Aa+?-FIq6CMS;`ot7?6yoC{e9WA}(eX($ItG`ay&mOBMO_n3KJ?j8I1TFv zZ?}@j!1I@o=fZtcqOL%YP{D3Dh?AHB_K77XnVu;yuQql2Mem!$#iOjP?@zr_J`I$& zr-A*aQE8XeH;fP`5d-b5;OoiBg_1Eg0O=!624dtd2UphtT;_?9rFlUsRbRw?*GJL@ z-eKBzo;EfZ)W9}}O#e@}8TY1`M+ZC|+hX$8XnQIrZKHaaDzj6mWiNFfD@>uv$H?7n zz#VmLPGjT`jC*<-s2!TeXc($I4OEDY@iX|-AP8{BSYl_WCRxEGvcO`x_cg5khX6)| z@WE`Eeh=57AZ`_YzY-b7G|e^-#B`tZO%$(RQ$kFa2EK)m?Txy`)oUv;s8g2hO3YSL zY03s(B)%^uL`$BBz_#%E8Jk+RKY!n2Of6&hB3@v-!jY4VG?{WIEa&5ORuXyTlJULg zj%yVw#QXD^2ORq5f}fpO?kkOO3zFSaCAWn zVE-S#<*Wb?lpVKGPTx&sJ5_E+{dF6si-oaLa(ohCh+3_HD~NfXq`GDk|F0G8u#iYj z>+^hs(271|6M^ThMGU=ZBSc(B^JmFEspl+qyj8*qq|gD&alamQ&{Lb) zkti;n52rrLWRx|qN!2HG>fR>?HbK&~9j8I&L9MKpV32MTL)SxEVG5NWO`*!8snk}O zO65mWsZuzP%8$+y%O0I6mOVMM!zoYc<1-<=c=~awtig8HBQiKH26BOj0H^l35IcS+ z=P0sWNpQwcS0B z%2VfwWm9L0WeaC^*ykyIVkT8yzz%mkmwI6i1FI( zap<0DPw+~ThJR#*+S%i?RoLF}d;(&WNHG5M@%Yb2nsr?}O>8l3D_G26?845qrx}+R zNxGox#b>NyXsU(pR<==Z;95aBk;l&Ky7q?&fHnKz!vzl*u%JbeQ@IEzL&?cQp9+jt zmZwQ6TJ%xC^_9ThR<+LqNKsLvQM29cYCy2q(BuSe3IAohwWy%J|#H_ry z&MWmaY(x${2034=NHpMh@d@)mvk|jBye`pQOW_k{bokt3glKCD3DI7{{(N9IApyr$ z4fk_8YJRdQ30#P+Gm?h1N_zQzq|a)tB=XwXR`|7xayU(Aj8HFrX`(bJSfc zF5ZdU+&oy6?s_q}nSEfyEt?ZdBUJ`JBPJ7C(hPU8i(i6$G3`d3sta{`YA5Y~j`rV4 z-TUS~O4IkDsN*b6-^L5f6=+EJk3e@9ilg)8$poA~I;`xMTn|`K&Y2hYzkB8?6Q$=d zY5bV=U@rbi-U*70ndQ+G^bWsq&V?~$&)Bp)H18JJ(5Oon;1y5F8Z^9|g0GkUIlHh@W93EKXfQz6efTVw) zdR0BsGZWa|?~gy2bXRrNtEyM8UcD1L&vw}kTLzc%5xuw2Bg?IPRor_o|7uz%<`>Mx!Fg^_hO4$u zbGsr4t7-cG)q!iB$mnMu}Z$Cgl(>|(f}L7>lK<#c2YhODQt3}3Y_ zxx|=U&X63d6;OCx+d1X;za{01jmaz79ThwDO2RM6lR0~GOHG>&vff*9On_S|PsW-{ z;8NmPDr76>pF-u!h`gl6PB2q;XsbB^iJj~%H<_t` z7<@7SIkcAXP>`a~S3$?Xll1zDHu0)O(W-l=K-K%q6b_CvQOkSrcWTLt<{SV8TJ@5#k+z{LEe89zIf;3#F_ijW6WF~tYrr_9bbKDstCc~ z{^2p$5(A3ujDFv|HSq#6$Ab|0^ihPu-RYsZdz*m5Kc7Fpp)}XqF1;x-+$8AD$y|=K2Iauiy+05^zOiN{F zbxP$NQt=23?NW1EnJekg-kORGQDv%3>L)|BpFU1phl;5vCc7v7*_?iwRBhB$^NHM< zRl9AosTyP!=|9z&dRpy!j)h6Bb)Je$0>w|KAVlzE2$8GVV5;j9?YJKq3n%8)TLlE{ z&3V`74|I<8VKM%2px#>q6BuarUW?Tdu-os)4pX%YW%F2&*(|IKwSqqm3%hFMOdMj3 zgxNX*r7E#A4ae^j0eG)DU|&xPe2*#SDc$uUZ~4&+y;rtM4B%FX0RH zRnJEeBI%Q$r`ffM(=_bQFiq)O1QhxOy3d`^;qX4EWfL{8Jb65ZJA`AArF0} zPeO?N#Ckg1WSSwc#_b6z3i4A>K;a*Z8DcFkcL*@bvPL?B+Y00w_x?)va-iOGF;>g0 zSm{h3;jd9vw!7J8-WuJm-RTaGgtwjpKP90({Vk^4yP8$EDhpv}qvU(o@C%Uqo4DRK zK5h()Cxz8w2bYAW%4E)ZcJ5(zo@w-+Dsa6>sazsek4~!^BX`BxhwQBMaBqP5n+=)1 z$bwFnJvRZmSTD*R>Y zZnzvw0RJ^RR=R*#!eFOsK5NrWichzk&ys{|qjlv)`iJkd#hVdJdTpiD@xW=wJ6dLm zSLw1AJ^w!~B6Ev9My;gNzDM#Yz>4&k^fw4hI-33{wYYa9gq3yfnR**mQ)_S_=;;K~ zW#?B@I~D}PcfnUIeyj7VsmFMDE${c?>zTj6gGWu62+^1SdM{vNa2WiI+AN^(=k4cT z|FvCxKgXA4uW)>E$M9tp$CsS{h%a}nHQ`H+31943Tf$JJl-X5~hc_9frs87G`q`!G zQn|#8A|8e!zj%)_lsMu24kF`WwR0RUZ20v5xNYiR z6s0QwDQ;Tv-yy~1b0Wpv3@NVt)r1rf!C(%Py1z37eq5Ub6#jRjv@!`l%7i3&oF*;~ zax3KbH-InBaUG38oXJvR$uac#R#LM|zRPi-B;aU{&lhf^;$^F4A$uf9K|GmKU$ZgS-7WeTy%hb)#`fW zJ*2Y%gB}RvM!uSS>oV&0fp7`+6ymhSAGpU zvc@G_Eu`-mJ*so@jAO@tr$#XaC)&`!v| zEWT~DF=>5U`=)D%!hh>|^MkKQhF=p)jFz8R3(&P1X_zm*(zJhaux9J^Dc?Uotm722 zrYKF|3xw@a#Q`Hx$6I1nW#*Wj>ytYJwGNb%Ll`53ZdfL7Vi-E-H(F>((1r8`i@=8e3qU)BIT{Wo%WMr{Om zE@AN8x)I>%yD<))9t@s^MjIp6w&~$Vb!d3|rbM1uDuqejQ!SwIYi;fM2Y=NLzb4i> zv$*5FFLWagg*z~Hdxw~=Ynd3S_(%fExaW)6idDPDwHfT+J!pXtxum&7o z;F+G{dtk+^%t=#AZ^D(QSZn_1NgpV&AVls1ElR44Ra<{2a~Sns2_JXC>KOWQ=3ZQR zntl8=NW;MBU@24Xd4ToR(!b^%Oe<#P{?!&_xLDY}E~HRmE7oi~>7Vl~SQ9@f?jU8EM&M^@n8(J}e%0p*0-YLECCccR+vH@Z~BI}2e&T}FS; ztT8Mu6Z?bgn_trYx|oJNtz$twM0CKtPZ%@dV@|8?G<*1GIH4o*-mq5Soyjj82TKK;JqT>hkA z{9&Sm;Ks>~pci~BpvZ=E(%V%i$=_ZRCtky6TmKNY!HjE|cT``j+DhT?daoE6i zYWgb+CL-PjUy4^<1GJ!!JvrIaCG6?aYqIE$(QFr9W8iMzBK9B)$h$gp==r-&-W0~Fo`+`N0C0G;h+G83Rgw+~+8w{)i9aw>$m@jWyNxT03 z*p7XtEZ!Wf;(V;R>KqA3jWyA?B~hBNdj$0K(gy;HEI-%&x%=Gx37!tvcPiz)y`Bms z_Ie-dwHNL6xpvm;B4%%AieH6Qo8wA;GCadg(?E+fqaawYFLE(f2ZXR5#FhhCX~Cf* zX;{m>7ApsEDAEBJoW#z?^8HvHc(iX8@^`>$xjYeD&Ona3@6z0stt?wzhvI@gw`#dT z_SCt!U=LpRZ1-+=Zo2r94OrP-+;Rwqe(HiJHDCOhfJP{x;@*wLm96wa^4$#i=iG9U zo9=_*YHCePU-J&r*VtK1e&-4A5z0;!JDa@UFm=~g-p{dC;_wS@wIsjTcREj+_iug! z#u(BJ8WBtR!3!7Cm@*=toZ&WG4u8;_AH5HQhG+&)1yC+ebW^U%O))b(RyXd=JS-`W zM!1ryS%wPZUW1JU&P;{7GU^JiVTa#>)slP+APPq>}6o+C9bxd>skuZ(@$tXt`mCGII!Ms^mU z`1BVE^y0y!v+?LJ*^H!r_ja4EivRc}8|lII@4Wr?+YmN36uxc}J(D{0u2aOtzht8t zQTrtu`1+5GFN@zbpK|1k0OJomuhIaR2`{R}7)+XJ*RI&beHd0)2CRT0L`B^Azr0C3nn^qq#39`Z@ zsHJiymfml`p>WSf>r<57y&G`oTM?^ndmc`)g5b$*&&MhBOvdowz$x@91P6`1kl8=u z99-+NGiQccmp$M3!(seUXqW&|a&1-i08&1z>t>s&C2tEToO@3Hcg-)`VcbLP(%dQw zgv_1{8~W z0buz3GPWS3+PeNb8!2vi5MaanR=z+G4{(q#By8a5+Jr+t^?bBBrFRn!9Tu@#YRv<; zAo{D8S{;zjk^*7bOJ6#$CLa5lNa2cGb35fPv0(Tt0JOg0%%Vqo-^Jj2R(Re-j z{p)WD5C9iuxN;kv_^ssqe(e6YT0juoB^f`TMM?G;npkE4b^BX%Pj>s+z*i8hrpshr zi+Kt%_0YLzAfL}bK7A-;KI?4wg;*1RKE`_rtZp-huuIWSZWPZOigTvr?3&^F^XfLU3~>16lo-@-x5${E~lTWZT-<~N~C9Z0KZhYh*`0- zY1RXAv7FV9^C+%tr06z8IUYLF#Z^!hwwbYJS`4an1 z>7I`GMci^KBI1@k12=Jb7MJ<(Op?|u3ecr(6lQHv1ZFS%6Dbj8-|ky z?8>kBZUjU%hpfgL_y1^jSN{5&X;&5*SpS#bjBA<>phgSsiWaB~*yb@Pq$WNU;YeqH zlS0O4t8yEiJeFTGI%tR_#7Aw94mJuJ`I$Qj<%dOG*U622V82a#wZ%Tfjb=PazL<;D0V0ZL*I2NIHR-_90)}^7MxVtLjSfG9Mp1e zC{}{X9<1zc2fYA66+loN;7LW5 z7-LbLZ@d$#RV_|5qnN;}L9#Pqy#uN}kW}qPUd>_9j6BBEUC!F=m-N8}cOpcJhZ{on z_+2GZuIp*&@prmgH1Xg(BMcd1qu_$qJei><=^ra=tYGYwyHTpdfixCR1ZPRsf(W?RqzYvw;B;ecSmL&P5 zChqx>kAKn11bnlb@Qw6j<9T<52?DUF#2irOBYT?^{Cr9H-mr!0?Yry0EAXRio|JaP*{ZSC;`RYrq1zH+|#Y`Ph`pRhb}K3i``h+KV(c?O;zC?K+u zeUM>BD6qMzr5^}%oHK7mh*;@hPu|?_V2}R5VH>tGj)VCA6#;?e;Eo;VI6u`V?YOe- zz-e8V{C`J`otm725bk{(4_?*{A{MmH<+6Q!GYG#=;&-r`HRrnw zgey-k&PK=!S-+uL~DR`SVB6nybMT8V<2>8AVl7RI5{Z-_wO^uTnyY+i0VY?>Dt z=+PiJU#xU&SGN~Wa$=&cN__X?ECa`EtKo0AY>y_9Yna&=tGSI>y%*!aK2L-N4^nbv z@_Ot(z=Nn;BRfo&0sqSaip)vu|B*y_kl6QPxaeMaaTXVdIUA-9hI_kOgmEC~>BSBl zIwyyC)I;_plv_;ECrl#PNtePb2o!<}e#m1hPO;%m;3hGI{={OF=HV3E1+1haE8$Mv zRm)N-Utl{7_dMHP`cqp$U-I&mto-Y8I&HRQ`cqr6GFHBtm4{gQ23EeBmlxDW24F4g z-AA(!)?R8rUFavD)O`ToT#dCJ&%$Q|w&PkqyoE;f5cZuKR$dcj`jQ!52j|1L9LG|~ zf-Q~M**L|_==OyAEraR_ts@23s2n@cgko(WK;)Tg_m9$w>E3 zB@t}xRx{Y|)7{kILWryx8UuUOP7~Pi`1^Ew@=T3hr>#el-$v7~7EwMrntpX?T~r?sO@j~! zdM}N3$r2fqGO1_@>p=C$MUll=9aTcFh@qcSur?^e-x2$cbSzftu=2~0^(>RtVW~PZ zV~D@W)9XLB7apwZ`f-<48X{HW2Y(M)Jl8>8zs2~}tyWcBh|q`>JkhOG8ToLN+WD=G znT8ZGpD1UO1ml_8Uwx#nQ@3)#9J`1LA%Y2^Rz-y%^VQn9!@wg%}QI%H&gS zJYT}C@yoEYuDmhU6e;fkn^9yxe_=njylg??JI*HQ%ckX<#jB(-Ef%0PZ+|wL7n75c z(!UBQvL%V%vRab#Pnx(Vo2$8A>BOiRv&Znqa(M$ffB1Y9!7#lzdD5`<l`>$j&`_ zsRSL2#rs55`BYPsgRowdX9Q3HY%Q9MYb%0LaW**EWy<827mSO$5*J>$J`Eu}j{BnK zFkgX0RhHP{fQiiYqGWE))m(V}#6eRLNSQb}f#%_0$P#jIu#N}pbJ9s%&8-jTJgQ@5WTC~C_ z+VG{R2t2APm=wSGc>zIaq~!DG=5}JW^H>>Mm&R7x$`>gO?3HQhP0z=PsE<4+pvdbx zxc#q{O^;_V&<-4^_g=}kI^U@&-U|&>z=r4SR_r@9-FqC@7J*}Erq zjZ02PCsA$n=9oo{hT_o1Z$&8AddiK z%}|#>Sbpurm23jMr7@Q5rDqL;lK|I8=0tSJi6bO2=*Z^uPj6{lI$T+B$%lQRC(rb-?!l2$@jSOMH>7UaUPJIO} z_eieCWoxwQjC$e}Q?<)`Vt8A4# z7si)?y`sq&&Mg;4b`K6CggkR2e}_LL|E;>tEY!%z#R3ZdksyCaBzRoZu}(0KqT^Wl zFim z=}74y`xi;i)fV}6&lg~rPAOceKj^t1YvPZFF*RJzHE~l#I*N&;JF<1nyNEvJgOl+! zn2C}py_jdgzE)xS6WG^k!J11hlb$n~f=ksya{NJ0I-A-+(6cyF<2y6Odl^r^$WmX@4IIdllX!$XY$QY(8DjliCGn^8XRDZLlAMHV^-b-h<8)Ovr?Bj9q z1g}sK#D+l%w~sPnH=qBN(iB;IPWrtrc|V)tUFoX9QPDG;9&tXzBtVlTp0bS5ZSjl4 z**rG#ig!%ch{A&ANFG*p<4Mhs$wnJs_jkbRQ&y}DvSZ~*E7tC`VBhJCDSIL55~JjH z$ht(&jX!B7bH5_f-c>@8o#cZB0)m5T$2rHhHtGBZ>n$rHTWbVLsjP}Pb5{lm1ZVc8 zC8-FJ7o#QPprkyP&c(?Z!F;?%|599XCYjqGX5o9Y7Z~`P{o{HTnZIR^;bgkci4Zw5 z03p)EKAQbWBGyr!P3laV)OPZZ#nX#v39-CtU9xP@V!#9XLd-)IbR*^a4Jolp&R4~` zJ2McYw5b+@P#X#)nXTpf|Ad*yY1v~~M)I(K*HF-&OxAba_n+WVMJd*;aE@*Z&D~;C zx1Y@yP(<6&{(NYDx#+Z^Up&k7i$xunB($&tlY|yRT<8V=%0Rx;y5zqGY>l3iaR-F_ z&659O3qnA1A*-NPE*Gw+i;QUN-wLYdrNY4P zp08SJy5%9{DgKNedM!1%^pq*orcV$2?%Alg<-S;Ry)+0`By$jDd>qku6!cC>{H?l9 ziM;BT`$py~ZIRjnJ+e-!_A_9Eo>7ci2I{@HLvBOONb#zQ@9r?B*iJ!I#>kl@bsL*g zy7qzK%+9{nKfL$R4{!|?A82d% zeP^-kEo%H(IqBc$J&yGdc7|qll&Z(1=S7YMn!UNeN+^EWk;iZuy06?uq1vi6Mt-Pe zMHZA?OwM*@_MdYF6t*PMLuu~ImL&bMaaZHwU95@6>oa1ydlu7*e9{q$dJIV8mWZ0D zL>LzZ0TL$q!RiPiAX%_#dyE&t`6h{m;*$HjOk~`b;+FHs)0^4KD=v9J6b2(IGma`z zukHxAKfpi`u9H>8{%PR$aeW$7)D#h=4}jWbuW2XtqfLA;z~KM+?mfs(ypPOcaB%#X zJAic-+vuOLs?R)`sGhu+9-S}rVfB5jzj-euRtCUMv)E%Wt$P)%`v@y7ea2J@Q5{zG zgtdFbFQD*E$?;4yzwG$KI8W{pn7v9Ww^vDJm`Zg^;&F{w z%Wd+fU^TOeUMY{#cR{SBhWs%w5JGli#_hx+7}u0QPztc@GfMX3m_ zM|OlFEQA|wIt&IYbl{bzXtPtVXOv9KT2PyfxC>3D1>F1$@z zmrX}DpZ)}b-p;sIe7Y_h0jNe7u^CE5WZ73|ncYh&vwKNJXlvp##pZSlhK{|-42EcX zhP@6XFb7WW{WZ};(V@EK0hHyumA!KWf-~(&ol;U@sJ=8KoFs*_f=@Ucxa3++D$WNm zxuYYXU!+n%;koCs7dV>4zZL`|U0G}&k8CB{NWE64pEaIN^gkVP>7;ac%9 z|I?+~=GSJUn*#M-2`d9P3t7mm)Nqfk!S4%Ms8|VMLCm6|H_W<|`#+U3<8?+(!Fl4hcZ8V91vcUwTd@)6n5Q|0 zUDhqbV)kWLnj)*QDn^#U#@@e-Z)}Jh9JjHTe8e_3;}rt+(9e$wR+Z!t-C4ND*b%BDX9f5l91y~vC zFo`@myvlNqhp^H}POSo;*~oUXXqJE=op6l*i-+f$1bn(+xCN2k9|$KB4WA6>baK8pmInji+6*gHL~9bo!Zf9_8*JV$jncGp>$1pH?1a_SNsQ5K0>R z<^0=3`rizkkhBT1T~oT03q;8el&FZwj>LU^(s(#y&9)0pNTy2DxJJ#kfxj%on(Z%Z z)8KE6HQRjtzGOXkVS^2_nwf^RF>(n#*Dl2BOEO4Iox7#LYoOB2S`Y%2^FtV7a%9ia|s!=a0&nptUjgR!zLX*6`c0S<$P;&|!*9X{T^3BIF^XX!mMFA(%*MrqPm zGsiD({#)V&ZC*~)Eh2$_DoN*2Y^Si4=0y%j2CG6quRuHI%$jZW+ct!qbxdOIVEQ30 zFLcXR(g}KJ6DIz#InCgW#m2@oE@XQ1i8fv8Ox}N5K;hBn+213R?0Y;*fb2d*f1&{( zyTV2nOIX>+B|wrrsaP$t#=v@30&;l$)93b|Xs5pctj){f`g;o$)1sY~b!or9OIUx~ zpZcHr^DJiLTWs#{TGn3)D;v4E-TppexAuGgxBgtL&4F?K)j~XOF^`q?Y`?#utiQ;N z|Ea&%7eSlPFEaOc59_ajl~pckx4&i^^!LpF)?X=W^QO4|egwxc(ZR|J+VAf{)?erI z_18No_hPO&Oj3Wk8Bb?IJ6op5@rM=6 z%qYA63PMP=t^G8cJE&JLL85RfI%zvkc_d8JU5I@4NS<%wT6& zt=aDqf?EHO!3Fbgs-_}H2-=W=Og>!+^y}*6_$;+9xv%Lkbrb|68Njq25^7zkec3TO z#DaaTV)>5+!AJ_$!Qp@2pDf_M?>i+*KCp|RFX;_zg9AnLKuf;Y*+XjzLeocM-#3q_vrMtK|#&7Ati)bXW-JGdn4z z>3ze~bzOIUB?aC@2!$V^Fjj7vP0=#+Q%78R`V=&NEo*$wF>~XWu*NUtjbp7x;lT_P zS&p^kC=X8asTo-7ap}PfTJ4WR87TY=YhS;Zj^tZ(h|Wk4fmPT?D~yIoDshB6=(;)9 zu7)KX3a5s@YSVQ$b4ND3n8+O$!M(iGQquzF&;@)-j;iZYWwN3tRC{WisT$~v?vRnm z!p_60ZJv~P%kLvc6@#fV8PO9VGO-6jj&$NN-ytk!x~yj1x`FPsA&x+^w+mLpLBC|i8E*{k z^GhZo*ZfsL5khuNO_Fyf3zKrV_;4Z~*MtMj(!9SzOp2H<228oHRg`>xgKObv2Ub_G zxI9=JP~=u!C&!Km1id+KO}w%-6OB;)!PN+%5rLo-$Oh{%*U7PN)qS#TL?Afp-Vs`f zKEk<0^4)7e=yuf|YIQ4aIR`4Z)iEc>j&SZ}xOb~o9%^-|J?>dguZ;Ux0-sxJU7=Ql zShEvpL-SH;Cratbu`FkLV4sKmrF7;BG_^zVfC41>l9oEDTM>BKwe2{h;iPZ}^7nLd#^rXNh z79vDE0zq)eisuCs9-=c-7wv1SXD2|Nm^GKOhbN}>V+vTm}e9hM_R^~lyRgtF3x zN;pJP!|pa+C-1cYwq4799PqOve*G(tR`L5m1-~CKqcOwx8RtR#C#4hw+EO45q)Ir4 zl6I_=%VqEu0-B5io!-39Omyru=71G*6XU1?HiLjK#msV$t{q+Rv2LyOWSLtjmGKB? zNb=`c5Mp5zYKl_1xt3M6-L*CwxwRsQNwtgRTgZtT@~G~UbZ#h)xlWe3wc(axXZ`H{ zSQ~D^iu>eP)t0?B8x?DV8Uyv-%wi?Ak=_I<*iC@W##SOeC!k0eYoeQUg8l4fcXh+j z+zq17MMEtJ8`?G^TPBLv2!#Obage)pE zxCc$DOcwt|FfAlqKxT;Op%9kKWo}4>@-i@U@#Dw75F@nLP%N4ehY+RAWxgz&PRnu= z%fbiGo|}J8=1(3pE5adv$>?;TmzO>P{K>!{>`p1nTt?z@ur?*frHbEtLMczsdmpYH3WO4Nt>2;! z-1-S68$)gO6(z^RWu`Ide*gAS&MNe)2CrHNN7I#!0- zvCpReJ~sa(Bg#YQGdJ6ZQ|8^i$vvH+31bC@5lOP&!v3EOQb zSo!FU@z5HCUaY<%1}%@~b@IV;Ks!7E+GpDgXo~>a#iaEQwz?CsZ`SyKVZ=%dT{Ik}jhll0e2P#joY&Mr*b zrNChkA$2)rL?{(`4W2-Pu}=zIAtEjzQ?x^yl99^$1cY2t_0ZIt{6Wb-2eE7&Zf$tph(ORI4yh^X zX>qAy_51XAz9WZ{${TU*P;Su2NTy2_U!djc@O)Z+p<5|t)N*o*uERwHD=luNj#MlV zP~?YVrPZzMDhQG%7f?z_kHd!-2y8HvFn&N82*^@VQZqD__Ff3R56`2AG`@)vq8CBS zH8juAWs!fTqL^j*L=b1wE2R{eK$(|jtd`{{ZaFuq`%=d6)1Q)A^43HlwmM7}uCZKC zg;*V(kDXhkz(ET_YL9=smkmq)Gcu*qtSw?Zwrvs2F^jDE33M|$pBZQa-C%LRnU=it zcU?D!pDAh6_4Vv7nN{~LtYDgIy8#+xSQ4oB-oc5|kUT()K(nWhR6V+Jg zoD>@D3|6)l1j#yhO_%UJ_-hZSw&8xa6fSbR$n*0B6uuuQ*ARBI@}1IkGAo4}&|uB> z?K?4}#X_tWv1;x!x*lEC*(qT5$HQ)^e5+P&TgIfjE_ov9OFR1GkEm5jrPUI>&sEyt zDl69QU}S^r8V`P|;YUnlM4}y4r*LtB&!2ePTmeO7TRJrLeOV$+jAS81df6D)#C9>u zpJ3$2wGt9Os}o;3LgWGoA##c}mFG1zl|h?wu&ze2@~4uzs-RtMOg~>&Gig^jw5!)H zK!_})e|2K*ZJUvJlXcMc9!+QSRmfbUs(^qowN}M(-fj{3XJ-^M)x|MCt7DY3=R<#; z5hDF)r`=e8lPXR9f#pq!0ONbAGnBu0PvZDi(#H06KHvEMM$YP_kao4zh7fs=HTC|~ z#HswjLc8)o7-fosj@k;aVq=_*Wu~cqHm$z~>XQS^iK6Z9M9@SETG4qy6DVkZOQZ8a zLCavEt$!*}{xqC*l`)>~kW)$aM;a$$G5g=Bq6v(etx`44^VfSi874bhdEiMab~bo_ zswv8|Fm|knH3N)foW3AFV9=R*zB&3oD40~Fruru1PgCp-+qbs~Mq24OZDCZbXiBs8 zO9<1Ja`t%(~}6)2xH3nCn85n8(yy-wP2U z(0=lPJwQFxntoETDA##*~ z^xFN2eBh;RAmcbXR$ze>+MmzATiVe#@d|7Lvo0K;g@D&#F^EPw^f@da&Pa#4+{zic zdGutQL!T@LgH}l?p_;Yv&1{6V^^_C_n!TfNZRK%(ej2WEUEbganI`UhgM07)aDSVw zic8K?Ts=GSh?+=GmDf6e$ryMI-}0CNPI@#WvwhW8!3Zv*KT?gTmTlLw6d>F*U80y7Et)Q z^Z5%s$KoNH@)=P?nygjvof4Pqv=;^T0iK<9vauWC2FJIgQYsf=by<#uF^`4hV=(a&GtFoX2~+mk zP|UD`l(WO|@QB1}AKOePD!GeQTXUX_Ebo+Co{XJ!((^&7x>UZrQzM8!3}->D%}yMx?T8~nujOoY|a7CTnQv{=Kd{?K)@{SeUM@%IWSe0x&5r5y>L^^_e5 z)O(`dg3cxn4>P7NrlhscVz@##$KDb~SdSZ6>OF$h#q1W{?A@yZ`3nTSjcO%j{x8%2 zt?Ry4!4nN$23|ehUyRUV3G@VF=Ea@2Q9d?A$3Wg4mtPFX0;~qOFSKt;n2%uyF5;5>WU^Tf6py5Wn(V;TZG^1f-#g z(mZXXrD`$#!(Z>|;;Z0nbyO|I4KU`t%y-d|dwArN$Zm4QsB_1!bjP!!as5Yo)>+RL zrz|YS7x!Dz#2ll$uA|!Y$oC{zCZNd6N%(2Ths)dLzluPTc@iNbsxepOM{Visz4NxFa#-#_BSLxb>b{*tuJp_YvRGdARpptbB=E_Ttd#8*$Iw*zz@Py9hhK zoOK(9+=4uA05=1JK0ojoG3eAP);`-9paiq*#m&wjXSnv* zNm+h8dl*HT+rI&n!R+VBd;U{=rD+98^7~E05=C?k9n8Ejx#%7NMb`hNM>UV!KKq5s?KPDo|J2~xj0DR3dYPN)Ch(>y`y*C5T?Z$XF_defgy zn;`XTlIFc-VG_jdjN|8i8Z|DtdmpF}ptB>*7c2;!knG~1r)}tjbbn}q)UR=xNUL25 z86WdcV5x5t)pE$3Fb<%?=u za&Er~wJ!P6hx*_^aHh4!CCgXhmOWSr;dPzAdTsLHw69*nEn#eFh0=&{%Cy~R=(`rpbd9ax#eD1ORvUFhjH)i z+O71(?J7w~zo(?3Wimw_`(p4J6MOvmGU%7!LL|E?2uaoHbg6gPDuAR!jzmmA(b`JS)G!BHEf~pC3^!_ z>X_n92}O=$O|%Y8#ICI=al2F#zZsHPWo29y>7M5BS(Yx~R&!P0!o6`-NzI)C3O{)^ z$=}YCa6N821?}Y&C2p$AQUGmSa^4B4OR7vRJFDyPoX?)K7$DP%5*g2&78O z7f8EV$4V0p1ZT~p2eVR#H%8Ho65j84?3rhA(~reU9qxS!``X4&9fN%#N0oa(Zn3X5 zZR&dZZu$_c-TV?q?3O)n#`mnjnrlEVj9hUWOu?#C?I>E6n#g+6aSssC#b3cs?>ht( z{4NiiZ$_}8xq&;@u;-3WlyAmY#w>;{PnT<|M~wu|2wA{ z^UqYo>tX)WuwIWtKc!-&5eMpLT|$?@cSyook8AHvM^bfa3ih2EKXpo#D?JzcPTQyM z!rBlW`&!3Oy{E>Ne$fo94QYeNAvLb_?hfn=EypciW2J%qPf1D7I;^=;`3jU82js=A z$tys*e+jdm!G6-&&l@EI3LkGvw9o0)PJ9xW`hW7N=l0waO4{`om^LFhw|)sWVi1(E zqd&nI+kp-9lYDG=E<126(yU>5kRP)Pf8Pr(=?0(HYV#>4H4%{1)_2D>Y%w~-tuCap?gdE9y?Rl6dZ(3n=+&tPx4F0mT@ zp4{+T9HG1Yw)5K;OLGg7;>VJwe}$xmm(#QR^1Omzq!U|nZ3yU`Ct@wv$}Zi`ladbt zr~rz9HvR7qnGN{W0Qe3nxiVJ8FF#60Sj*b=Q99BaNzSh@hDp~Y5^K7C&_7c*2b!e- zBp=dT^5t=(g>#k3JAY-fQ)C$AGp^D$p7nSi?Xiixc~aNO;oo$f%P7T&~D( zhLT_6qPdcoxh$D$LD5}N$n@b%_EIXBVD&-}zL23OVN?%06IW&u*ZxF3S-2_2NHEqA z`z3u)_=f=_+|7E|0jzhhvvHs^an?G~Yq0C2u0srD9MgiN=}i!nyUHbBE>#EpK`HQI z8(ZAG@LLpL7M{>`(v8kbDurGhBNv5>X@Qp(AX*Tie;qlYn*!UCmRkiB{wO&(`|`WaZxR+NII-NOVQ+7 zLjx5 z3PLl}R9nSDuq4^9ihqGGyxE38w^zA<3W7{;5&7LPOVz{&iqcJiSUT}QEbx>MTjw7F zLXq{0iFdDV@c~rv2`Id*P1hHbkqLFguQb!vf~T0&;1<#aYKk%YO<4r}NeQY+0<~?940`1>gC?XYDKw$tItG{zNDd??*p?R zufQb@cEjM8jN1*{V}Bh-xjS!e(^c`Cx=avzRhje`^>vvD@@##;>taoG+?sBZMM?}2 zax&>t9w0)dfRHM_40t6id>G_U$AEy|Y`!BvyyUvyb)BUAtn1+$fe{^idtD|XQyviv z!*J0(%JdEBrseNpNsiqtpm3c@29cz{+|9#BkQ1)<^p^?qJ<^iMKg3!57^X9Q1r?4eRQx@`u3j0RhsoJtkNwv z2?*@e68Fofgm^ESm}x}&Xh$#=W1s1e%c^+THV7o2Px^CCk;i)3?GjLANkV@&C*=m+ zUm&1pH;!mG%-o0aG_(z3kd|R}34Eo#Xs16-5$xP6Y2Dbdb6Y(>b~;%QQnN0f4|Ya& ztWCC4CTzmUyUqi zohDvskhjcXOyXRI)ha8WcK?Sk5$suA4E<`ObnM(N`9PeedKy}>R^HI+QpMd>%o6xX zTS1cH8DSjcw=P@`ZL2>?dcRsEV0#JF9q%d zYKiSo(1$%R6W0{>fZ<^u^L}7X4<8G02B0jdI^AEh1%%GM%D=pLzLL;=%`UvN4$@d31(Ts0u zy;Uo3YIUhsUNM&<>M`%Vx7NCvxT6g8Jc1%>4YnN+FAm1htIO#6sY%h zaw`+;;iq6@*SM8K6TiA|Z-R@`I(Eqe1N&5nGGLE7`{yL;Z`<(?8BviqoYP;! zG;JO9U-_53?@0u41N+|B-uHczzE7WHSn`WYCfPX8|JNpHqKG!Jc}Lv8qy!cl?*OOZ zO?PA>7Ab&-Sb~Nwc*4wvwEMlvzF*Mp_dEa6&iC;H!2L}V09IY=_n6iwfnV-vN%DsU z+v=*t+#cr#cK^Anf~#VWI_ahsf7l369)X{%AL)-a%7UG{XWxyrkru3EZHIS*>eQ@J zm9S+Bn7BvDti!7K4_c~-zt5)IU2K`nTkm&$Tby4D-7r_&FQCW{tPLz!PdDxWQy3Pm zSsf$ikx$Z-H?ER(+r~_!X6>l}0MfA{Ub!*TxRea;xiJ%w9rp<+T+En_y`vMao3ogN z|CZ|n6yB1||4MT&Ns|A?$!iL0#g3n4BKzfV5i0@TG!|W>6jDJYMAt}9qkok61*~?$ zif!@iY((xIZ$YHGiv^J-H^!oIvarfe2ndlY=AYF`-~kIFhmKefiS$XVbt=VTwg9eR zb|~Vvv$7F6R*ewJ-=OPcYLMR7-*3}N@n52Xm|mMRkt(8DkWf#?n(fa1>9Hs^aQVA; z-B~U{X0dP$_xFrDED*g$s&>nn{(8y3n_12*+>Q{UYb`v(?mJdswl;%-{e>0T^nNpI zOpSQ?>}+JI5ig=YShc;hKGO(GmsJISm&!#2!N`?d(w@G>Z^4zJXzqm1UovEXdN7I- zMc7$az7wmq8*a~pxZ;=MTCv~lnW!W5{=qDer{rKwoY{wtQ@r4I=AFx(NVUOdZ-X=L z{yyo5EV-BRIjT#RAxbCK#3Az&Dfr+o0D(Fb3Md>((%(sQSD-}uMpgWJ6YQ-9`qRg@ zds8NIhGv_>Xh6i#%mdKlgd~)`{znKLnP~)4d-MmWIHJ&05j3h7{;2C@1Z!rJSpf<8 z%Ke`M;3KXTP#B+EUSF3a-$)2a&M&8h95Wo0zhgsV!DEu^I$9tP?aUtOB zUz=gE#|{)w_@eXXGk8QujGrY{i>qg3fi!amml9)ke3-HUj&|rix(tJvkSpWwDQ~Eu`v140) z;vropzWut+`iC&Gup;0hr7m8^r#_GD{2E#ud0irWzkO>~^St#fR9h-1!ngBVGkl1_ z_sNZk@X_y8mz>Mtt2?Oc+fg8KaOXA%GdI} zIIMtfvmw*7k!rhfdNy*_&2srp<;}W&T^czsYibIVr|K7iiLYvNl zQ5!-uB^3Emsf*Mop~yONP5<`eAJ0mH=V&!=^ z8koLxtYv)y-(||=A`XonRNb<*cPLPgHJ6+xymg+7^s=-#LdBa zh_YeMMoDAY*?N5SAwAg!*3lp5*OCvmwVJyVs~$SlvmB7vP5lfScAoL3U_FG@9v`fs zr}aKh4pujUo<6o_C%(KU6X~?%3B?Ci-0I-RKFmaJReYTOI73r!cWcng5I@4| zpj}wm$od@YV)brf^`tqux~@mMV@l&@apo0p z3e}1wbQ*Mhw()7vnoKm$LY6E*NUBbi{Xy?0qm5$^3W&^D0Ime^V*i6?Nin(aDglMd z&VAk&q$bjT%A;#ljI5_f^DYw*QbJ(HtCq^8&Mn?clsbBa#>b@(mr^2BBt2JC+IUhsQ_0KbGa@lvxd8>HMBf51#Ivl-hmMNYiy3_bvqhNZYXv>$2>ORCkC)hdNr z8z`Z?>nmL+MPKMi>0aG3R+n(vo{QBNW3>I*&!P3p1}4(Z_1~C5ee@fscF)C$wDZ|- z%(QdcKB%T%mq2M7Z6bX1drIFqd=?5{E`^Wxf7l$r{aX6P4Rx^8 z9s39<{Lz_q?$62ju_$6{m=creS1BPIvLmFH%6*+%ym^d}b1wbaXfD)^*amiM8rToZYdtFuLAXnDS#` z-_bHUm=Zde64fP7lr|hnmwfBl{b6QT=2V%_L$yApxa9%#($75V7v@9A*NMJXK?;=f z3(;7qddW5f843bdS(sN;#LZ|wh;tyr|3RWT_r`qVs^a9;@Z_ zWUf_?gSKEhbf9|V$Bs)Me#pslZING>0`GD3%3%PddONG@K~<_-JmYbo*)sx!r=Ct2eJM2_FbYid>7Iag+*z0-K_r`OzD;G#t)kBP06gRWF|ZK@1HoN0p@ z{y4&arc3eg0|FvLjoKnz4iAd{G2H!g48{?K)v@640W2UxY0 zUAFr25A4iIk$teNg^?c!Y!Bg<^TIdjx?WR32WP2)Tz?khnikolO+D*tvqFPUUmHdm^biMLSsvRN8?`zZb0FXd$zep#e((7^e#lm*)iw<(b4j?s6 zeG^H|LthwEL}Bg9xTKV>iG=)yFU<6QGCN_f?Vm`<>G!6Crm$Jp$(=hXz1zV>3+>() z9pvTh&|=Rd`2P91Az7gfPG_vqxa>suUjEz+Un4s<`~`{d(eF(>&$1?6Cm;Vy*Ny%k zxjvEi{vrgV_+^>ifBrc4{bya${l_$1->D8}p-tr}iya|vH_QTUTbb~J-p|O9Uv)k5 zg|_4@WqVIsrtObg-Mr}P+D)sQL&i1$^e3-KoV|Iw&9moZ%XV2-;_Qv()tD9Tv+P(J zgf&Ah)fadQok-g-SOWMna z91ov23BFRJ<)wKLyQFnIhj;3xM0jf}TDfPsY7^tY&BC=6hSz!wx`H72*(IRJF*2{G zfWlMscKEL~=h0u7`l0U+GlZj zpeZYJZBcp(-D={?Z_>NFDsEks$qk@Y+xx4SkiVp$Inn{^Td>lKom(XBuS`zRq)ttz zXZ#absa1{-fwUIKUDAic(v3UicBy(`_wiY%$p5=HT_2X~6R)M+L_Wq4WKA&+`wjt% z$?KZTuplPlV-eP9Ewx;BupT07u~N(a3{TW`JvxH@Z?F*zd4t9O{SylXWGi?h6B(g? zu6!dCLjB-C&|9O5Z@-a=6qlTfHQVmjxtQg;Jdm2?27=NeFh~>qZ!jaiS76xUs+FJ* zcy?tbuzm%+z^J9c}JMf&U^}FVdijmy8EH z(F}-SwUA?r5h0U@vn;}z_`-OukUQH0tPJNG&&TMq5y7%3vUX!{Sn+|cKAw&uuaG~L z!*;2C-DEJka;^oD=PzxG@&sRFO?>0^OyoznR$TUaCZKIt1mZbBhR{t!+{DE!PMk;# ze%c7c;Nw3DDBLe8zf;_nsjaYE5TZ+QE_=a2N%HJO0RVRBAU{@ZXCKT4yN4zVLL~Jl zWinFbDwo_9Yc}yUzM4C7>6U1CFdM11&)En57cB_Yid!DchLO@*9mX2EFU2M2)r#|9 z%j6sTx!0I+#m!7(@{d;y`=>wgIC@gGTkaa^sD$7gt#ZpWbEn`0qE2r9uUZt>m(p1}Z?5{}xtiRq{#5u5$j%di9kq#qznm0qLUIM&pv&V{S zhj1Gzo^dXkopH(>6DEKOYz2>Mk`q};Ue7iDRtcWY{SDZD6$ib_W*2MXU}HX?>(Qo% zSG4K65(+=krt3ASG9vFUVFwtq864?AD4fA`CQHpR55Tl}+kF1MHQtoILEN{ z?#@j}rK(h!z+EA#DNM%q49mIW_Z#}UkQ8X*3vY_h6U|*f!h;YZzvc*tyw8=acIHNq zbrn4roK2qDTx|o{B?I-|QE-;LXop}d-s`Zl!7GtDaiLPMTDga_^8`PP46hP6_V)0j zEwa59LVm%px6(V}2CQ1J(qP18)Uy8ZMkb0ZA=e`3S_{kg zP@J|l z8|~4bp2B(7qlnwHYPK&aL8bT3`IPpo{vILXTM?W45-@LbGLrH1%N)gr%h`#xFhi+|Kdd2Ex@?Jr}X#@;CXc= zjILSz_68f$hC=ga45Il4>TBu(Y!}(V>%y?@Uek_{_9k8Z&6g*V&An8~zvMoI)I1k_ zs?GS+rVK#Cz-OIEbxeo0@7J-m@y48l5avy`+b{rj+2?aCuK_E3=R5G4hW<9BZB2?7 z$`lq=rkq!Pqg$87=dr}JrZVa(U~Ye|QR|q&sb>b+!Ng~&4DzduaQ5~)GLYURIeI_q zM6#py4|F43%%W1JVTu2V)+zh#X>bVo9xju`QuoPd3f6aG&!-CpVtM#+>^bB=66#(A zl45a9&GVNa6g{YZKN}>SQ_nb2G!@H5>10nEfu(wCe>!kf*sz9x8gkxORCxmy;c{^r zmTh`-ke|5@1Tvq9-3OwZWq!&!Q`)-s8DoFW=!_ktV-I7Qzy4__i-40Imp@GfQE!4E zx;S1C4ao14Q3Z7_a6ibTqur5sfpi-vZiG5Mbh`_MB7q!zR2E2}zxJidJuj#wpgi|z zcO?20=m7xY?#DP3J1ZAI!~sV0qaR|aVYEA<-K*=S+}HqeW92h+&Mt#B&QD(jQ!;54 zksHZ0Y$c*%1{r@SMSg~ioQQ#6A*fic@I`AGk=mVv0{(Uf$cmG21K;Zza|6vXfP$`0 zJwM=}hUdzPCPFSgh z_pYUWmebS|*V&MEKaKYM$f|0Gq|@s+F!@rp+GYBZrYDW288=&+E+I`bp-EK-a!8wf zHX;5U`1y-epBjC%~;{@TfF2Ab*@#}~11>;|S38lrDcX(txR7OEDFed~xBA^!UIxsa za(y{lxIKdi5IwL|OKwDWfvVX3cR>uvcUQI1@0naB)OKMby z4z(fe5oiz>ATdwwEhpwytNVg$veBuExo>+xCzP%izG(o82|5Hwpe-I%ALfhlk ztim~1sm+D?TLw-B;Z|WK7v#Sw&mefLtuQq6SaH?-J_F&Jo-nFs_9LWb955-17B0Vc zkTvcWDo<9|Q&Aq<>2GB@sv>wuCvts|{D;%kXB{@ARxCHvZ8E67feCHuhG?EZDxR$T zk8UPO+qBZjGVL1kh-E%Hh}IVz1j>mygP3O+^(Kal1JxrTiy#<~ek-yeP3_cm_5GD` z9^LWs@a%YZWp@NCj-9t;pc*b5nRA6yEIQpyQ75DvxP{hEPFFA|7mH3${VX-+xeN&t zxfBcbGYLT`uB*xkW^Lrq(4wZ3ooSca)Z2zMzurZD4Pj9|muPQc!LI5xXAJq+sEV78 zkUG9Fe#RYVcl2O>rHW$;q?z@J1++X~0hGrZM{er%Si)XNh#ssz4EZ^Y+?<4O6=5tF zd9hS97t2KjSSqTd*Xk-lX<(4zc25Sz8?rT%Zw6y>c3lb-O%FQwStlrOjbE(Fa`yd0>KUBu7F=U|3O&>lb#gEw42E! za#jmUqdGO;hO}~>X^7SGe6sAfg56a8A^L#dGuPFY^TLgcG~5g3Q> z1((_kMosDI!7}#kMox|Qzd+VZK4*K`sQ+M$Us;Ub?nrI1C}7!f8R;f?niE3R>L!ahp#%O=WOkKdDP8x6z=1suL4w*2U-ncxhC@GJhrQRgCqjmFh|M&s?snd?YMYnNfN{ zefpHKHkI1F$RMTyEWs}lz~Gp zk+ps*W&H$uc3Kjf?Bu^GrKjO<4S*rNI`!x$SmCRz!Osx>Ht1!LBaim5A?^M1`r8gm z=5KZUpb2yz+l;7-Z$kPRzG4xJCjAPglN`FKX%luQLI`{SvMFs;cmZjg}wQ zq1XUHWb-i{+?!8;0AFW77AE%bt5u@|4)D9y19G3T+mQD6^ZVb;DNkH}SaDo2F@v(n zqr!$h!pizk6XcUS@KB=qk?YW{>6>D1D4BK}imvK%KHE^DzYA9Q#>Gs~T-vQu*ZF@h zb|Urj3abA#nw<7dU4vxgF;T>h8>%XdG#~`%qYguV8IAj&|ZXj)6I{|8L z`v+kf`_ErIA%mUl9DM;cO;lg)ZX|cyw3F{O0Wym4X%jN&iAx`EK5=+Fd*YGqARw#b z+1pOp;djLN{T|<`N2^rS-9~eqo}1pQ&aa=OM@%BB4~P9|IX!N)3gJGG%TjUz6A;2b zF#1l9UaRi=M`uh+8Pg!bfu*5(O8n1erfVefzszvomTKt$K6uqblusBCbGlg|AP|M> zcG5&O+THrFvK-TWq@fyHUMw$9XWumezBHx_9ey+FQWt7bp(*?&b_;DnaL10nu|sTb zPfHWjLU_$+n&V2;YAuB(wSu=HZEB}+zKw0SJF5tOxozj3KYLPsL_!x5L(^L+Mr;z{ zPTe_E$?W(>?lzj{ z4Nf%Cje#aLJ;jE!10CmE?}uzj{Lu6>czA}1XMa!G(Ov^KrEC*mCt<|uyf`i_J4TJ6 zMz?u3#Kh@vlmAAnEU)ZKCmj9({r3a_xj7ve$o24J`P{y-dn>nwT7^)5XmCA6);a;~ z0Bv;IVkeTbdoFe&&we5J4ad^0gd#%tLjbt0h5%eu<3!y4aBCo&>@BWKYMkhOwzmwV z{fFmJnPb}`L%)uQfHJ=ZqF8+Jwanx!^Y_;<_0L^3G4+pRJb9GsjL#={V-0J0$wt*b zu#3xUsOrx@uq!Q;5MB?jk{6Ky1Rj$4>5H65cAXlXf%ILTt${Qw^S!Br5x5jsTl)F~*0!HR@OVz=Q1sc|yU)Ge z)}BY72MRn$1qQ359O!-pgyMO23E^Vq0g`Lc@`YDYJr*1!HcVvt8M+CEkaZjU4$;ZX zJ>NEnWr@6AmX@JdtQZ$4Vr_I&OcT{XSsmNEFxi$DNpCtFyR zhYULH@4BvzS)bqy4%O;%O>c0g#Tz_RT|@gC%qO}@Mc&leHxe3~^(KJ*@kp;27`RET zVPI4-`0?uiwxVa^W=x*9xJf&QDDoU(TDl#f_`= z4FLP+FNxzj{f2dXSJLqv>XA4;^1Q{(?8K|OuJ$J5WADc~iTh0cTDD`nV2vUG2W=NL zsRz$;NGmyKyqB)@?di)HBwCwZxiwTQP zqI1f_vZJCJd__Wa?2Anq$e&FmHM17iYgHC;V5a(I2{N5H(y`(gQ^l-fz1vwZx4GlA zRWXiwq>6IXi?G7?uVQ?3;h7Fy_54b)VTz-TM)U8^cIeU1u)=>}l{vBq62#REKg|~> zN)J7tNo{TC5V-4}4?j1b=e!`Xp!8hWAb_RX*dnp3<;})gZK!&lu@CBKjN)A_h+LN+ zY7#<_go)O2r0Te8Z=5=AJ@owqxNy1dg|@l*SiU_!+!}ZkOBMN8F3Fc&2Of7KSxVy& z^0WkQ7U~@L7rMxl4Zt$4B(h2`p{}+8Y-WLr)i*1Qv<61)ANdp)=QdH>XmG`{KX54e ziR2X3KfTNNWAr&@>i$m_HFeJ=^u9NX1$mjD!)yrp$V_bjfw=WFhqUFLaq*2xv3Qu~ z7hc>8@(a^4S#OBIc@*OcQf?@r*rK!;a`J=kFP0sv9|hyG^e9a}8B`%kICU#M%*wRb9zjw`u+3&Nz z1)&nf%lV{TIM|jdC@-J{W4pX2hwNt?$Ob8GApc8;>c8#dJ|mrSAFPjdcb||t>yM~`loc$ww<*+DR3V(XkNvJE-rzb)sQbcMjky_lXr%r@*A#}0_LQ*6AJ|(qc zPrDHOst3(UlrKA1H57{llhR}&35?^65+zUzg~lj& z_6driCWRcvl~{H*f|tPOcbUR@-~+7isq@*I|M)jZjX4;8vndHj{1>oejdOnNSd%`V z9cvVR$2|7<;8Pvu_~5v6F;A#r#to2&FfMKOM`p6@8gm^WIb{1@hYB)=8rYcvL-FP9537JdYfn`VFdUBfa zFASmBApDhBsfE)FhvnKzV`TS0d7~1V5vvq_)Y8)oy+iAK}+-@FVw9uZk@Fuu%BiEL z8JbF|SS(e)7-Ym1IW|7xGz=)Eu$-!?zT<^Q7#-Yy3|8>Rk95so$n(Z3e(MpF!Q5j! z{_*iR1{3Iq&9Is&-PpMfer+9r45c))<)?{iVK?KZl$^B!HG++Q%`kBFxs1X6xBbNE z%+xFAm2;5(BUMyCy#p{TpMlRgJjaQ&8E16e9no^m7{||yGi<{Qy23wX#^lQ?mIVOG*LZ$|58de<6IgIzsJ4SimwEz003V$fTCgG-I9Z73Md-JTW z z+iv`Kyj|DbP1+Xo#@p?>9*@`T(E-YKH1({m%h}fkoJbxvIN(Gr-{u|jd@MZi;aSVx zdjTBcjittJc57#z$d}!#o0@zfbn{s%&Q@4S@gK~5UeOh10y_XRy|0OpV#R+)i_w{h zlfRS!DJuMBel~MI{cKr2Rtov{HVcJJ!HVPfY(^osAE6X71-|8OAlLsg64qY37}}GY zvn*r0p0@$d0tD`F*L8Jnd8f`&kq5GqnKlDmM<&4N1Ns&SBAl0;TZcnI3EtdQhhWR8xCbL`fnd^R9BFzfH{CyX*d$tgB5;8 zC0n;cf251h>u!pQSjm2|(uqPTSaLm6=|npJY$a3%Bsd()(F3PMr3-(6Da07(@v#yS zTVf1{oTpRk4c$@$jDjv}M`0xh9R5tKEZ>A;MFzMTOrm zlZ`zy%Cr*Ei|C==q76mrWB<{aflY73kOIo_-b^A11zNGiAj{6@?ylH<8OE7S3i7;{t=Qyc!f_ zL^!)or;l{^P#WcXcbrEzK-J<^Ihz=~4+_XnmVNuPAGqhDBV0dgVBxmhYr1~ku?~FLVj0f*|T!6;utZ5?OE&94r3I>VvhC_IV(8S z!PK<0)T}6D=~r=;xie7A+4PnNncbIWK?YxoO|dmoEZWuP4{1O73G&}q=J>5;camM) zQF0~FK=^D2h<@EWbzKdySm0dk%MM*vcSMb!&ykmC5y(s4{xk4^S{87nnbD zQEvI3L(z|R?>-;@+?wQX6sP~o6zUv3ub|kQ%{4oMxf5c9G zdhce)6zZ1K>IPCy3VTnjQ&iu+i9_0SLe1ZQ$06r~QV_qvO`c9SINZeshZVkXIs__6uA$S(Hhb-K@GSpw z8tZh#cOAO==xN>RY@j&M^w@^@>og~d)>$;E=T0xcJqe3><&TLgHuou=4V}Dm{|W%x z`FWz`&SEd#JIN#nbIMV#I24l=$uvaTENPdjXR0!q=ZejM1=UEgp>o1frN z9?!oDlHvj1xD3`)W^7>BGlvAm#iAoQ6#ZIF{f4ZQ|K8u^r2c+98`od2QPAHzUw75t z^-2Au(f$UKeeovBuhmx$ah6y`RV(69^pJWt>GAD^^>;jJ{UzH;X+7O7Hq-N#$b-Mu zS!r(PW1M-KNH?@I+Swf~TR{ z%%{*${Wkko9=7Civx|db|IGD=(b0VCk3b0&V zNrktCgIS%{$Z_FzeoS5lI*x_G5v&YL$;$vw?QXaMfKkk)+``x@^RfI-;9lLZ@<@KD zoBJe|9?2&Sgt0A(F}6i?!y1(@zujh|Bi?K*(T1vDZ3cozS&xI1ndUfA%#0$a^}ojP zEgG;uPLXyn{wqa{-+aZPXsbJ--et2PpVZ)vsJFp?pQ#h!`lUOfj<(s5_H8GTZ&Z)X zM&?I*g8sb1d@P3mjih&-@IZ&IOMcO*b_>JO?%v)jK_am-@yvMm=*L@d&Ps3vosL@3Fcdyu3b)#8EcIf?Azb< z6ZIXsuI;1R>o<+=-#7>GN&*ZOhUHQ__Usa#nA)l9+JZ#;ce1k_RR<`=w7{rcT^r{R z7r#H;nUsIVH-(8J{q}e0GVi(zyuQeGF~n$DbD+br0fBksmnp2I*v|$~@mC#sv`NiA zz@eyouKiP31jg!f>uXGw1NG(|Z{5j`V{`VA!96&I3~piJCaNCxiS{ii)K97`4V>xx zmk4$Izzbo=8>JUz$-HL@b6?1x5_6cVxRVX~qx~K6zGuCS&j}pp(4(KjyZ4WvGe#ZK zKNv!D#n_|Isrmakq+JlN;WbI?Q7l}bfc(K&$rg8UGzS@0CX=2aQ%*UhPN?2hO85fx zepbIUs(zXhcZw--pZR+dqnZ5xRvalG8%##=kr((Vz2=fH$ppjJa?-8Bk1b?NrJB8F zib3GhD1lFJH{TGq^jT0bT|cuHM>m>F$)@YNHa0o`(VVg*dFiVQfO?$j#>){>C;Tf; z_F8%acW#a8KZbKC+NfTzmqXDtR(d$s;nd3S39giKF`o>#=4aReOt+Lc?S)UwR8`JM z>}!;z1EpA1qakP1Ib7s3ngRlZ45&}{u^GIYOhCBF-v`U3`SR^q@(Mz0o<05p>hWK= zIL{|os&bKuVTEsIY1h<0{fmp^{Zps{%s&V>cqe?YWf?z9 z8y|~c9}vGQTfZmHV<>t^mIzkmo3lJELUzS(roSjf`*s_Q>zgVQdfEt8EF*=#vY zjv`6Unv{BF*PVAbQFXhn>yHZ|NY`Qs1rE3RFOesu1<%+51A$o@KF5>NnA5S$CKt=g z=$=~@+lhjdI(Nvx5jf-K0ft+Jp31j)M>&;3rH)AY;xNg0xO!9_Our$_dWyZ02 z^bZi~93Seus5hP%oZ?&?mB>Lew_Q&^=_D8JnuT-#I zX=W>!zMCuJLj@ugF_ZTP6{h0~jK<$nc6e_Gk*NqP{2z?2w(sePnP=!hv*n^L%`+Cm z4($CsE1amNTBWP)XQRL3&pY(!U)1b99MW>noj<(({QA0d79>c{lPPibg%@A>zf|N* zy^W3ZiWWekd|Nyc=iL@V;v=`kAu&@3y#@)cPTdBGoQD>QQwL>=#MWf7p3CmT~ zEI{^>{vYeQy5a}jq6bu`8abV@>D`|5rLIR?q1VwDB9zc-BdyQD6wJJhiC+&Hqu61L zf|d3pSZt_{>8V`r+zR0f{;RRF7W7mpP@?fks;6@N`BtiO%3ge{6Y2btTb)SDpvag- znrr~=xbIdciZ-j)wQ%Q%*IkB^>{$xm_6kd!@D?-V*ooX5Zdw4$0$Y|mApyvb)&Nd_ zWGx-o!Q=+2ZeYs#+}J5)(q|^0k4ur&;LRk{8GS~rZRTiZprrf&rqZ2>L+RvC4p#Ub zJ2*oMHLI5@M)gv1hA>7`z(!-phr2)J;@DCGem+eF*GcMB_nv_DGMqW1l}eMv!3kmlh>7X~=C4Bi2$s>(h_q1q zcZ9VWc~6D#7N(3Qv@;k_ZNkC&91x)Lc0o=#c`*zi+$1c$2Yl)~3_=*aSdTRNgH<}lM%<{=US@c;()U^ASSb#a+Sw+)OgS62t zPA9$Z1-kO5V7ay#9KDw9B^OUiV9@4t=*zYKW*VR0lU7q(25`apK3J;VOI`culjQ`- zPlAqBuWAN=GRJ^EuuDAx?)I(}ath;JT&sk_tv zRx)QUuHH^&c$=E@iLOWA$MO?^Us|QjDXQ8Ph8$0Z3weuJjnaunIjB+kN+(SSBJEX& z!>vZG7;ZU5EU%<~7*MiwY7;>r5MH|aa2u#6I{VUSXvhX_d;pfE2*`{QOY+X zajcF3@STku<4JNl*<_-lL{&GD?VH(6%tRdRrt_O7I+0up51h^s7Jo)QzDa@` zSgxh-TAZGNu)K!8e*@(GX=V9(q}4DuNUj$s40Xe@>yPjrFbv7AG8kr{2bLT+O?0B@ z608&!0>D-*HL4?j=DEWH!Zy@xKXFLgVXQjP8RowZ8zan^(8nx2t^JAWw92hi1w~0G z-bd}fO&rorbaa&;YX_HTPtY{-eJ0iwD26XPvL;Zw+C14cKrm4rZVlXmrEGn?6X{j( z2ydGJvB8G%O|uM*%a@YOEB#G2Uququ$JJKZ|9@v*YP7 zu9ICWdRvE)hNbMP@lKkU1}laB*z=Fl=Z)I;W5+Xumo-wKf&zn#Dg6>^xt)1&q|CegxvS|&Hvrjq5(4N>ti_8Ckt$`q216Z~ZIai?S zoVcK7J2@|Z$RW#_O)<<)DLXk@l#V>lQ>kiXdUUh8_D2pyUr?`U;E;CVf2(gKa*6rS zu)-Jk*oQs0gGsGq^2`<%*BvNMALr^!EoD#FMwJ;7ZGAX@zvr#3TZ%1^kV+OVDBhAP9f z7X6&by?jX?Q%SUKGmfpR67*-u^>?>hi!F@+d+go76jF9@$@O;!;^0qqEDaX1GFVg( z!>|X7IJn)uO>N!Ip=k5}fB3)skMOhpNBEHm@JdKji8KKrQ8TDqzZ$&4-Vm4BXh;;f z!qFlmP9gvH35k{LT4f{R*qRR= zs03GmgY8~H8Em7GVpOvWVpIJ$DIhhe&wv^-1cSzMExfLQqo-JM+>n=11LmZ;Se{jh zgQvYh@N3%N?kV&zCS|eW-i8QA6F4*!i(vc3N>K!#+Dl;b4K+Av#30vQa3#mY$Z$kh zx&hwBFTAZEcmh8Uc&`^0m4R%+FRbhV-sV$a0FKX~fa79unhhaV*LNjtlbt=WvJM(T zDOm0cX}8LWNW7AL=~_q)6Fxw74W!d58oKlZbQ%(C;U~K-r`oZy81B)uJUVD9rh&Fk zc8u%}!5=qbBKg$3PtAN~rA`(rznBsgAYOr^qN%GQNb(XqTysN*9!HTuphz9djJ&J2 z6KdZkuP{=V06UZ@g=2-^FVb<(W#g{2jC*e4xV2G~Jzh1l2g97a60i-W(zkdac#y0% zX|KCUJJq4ePD(ZE43rk5w_W#dO2(jGdKxM9RA*JQqc2Umle+!3-mBRJTWvtR;N9K}7z9_p+o z`{WPg?juE5aa|3}O=wW7~k}si}fdVrFRlbpd3KH@es+t^Z6lKj}M3jpaeg>17 zV)jsA0yZ|;nc%Zl>*>(7d3qQAiPMtLDm0k?x1-3^qxi3pz3B?>2%hF15q1`f`CLql z4z>yYpMq_D{6CNeOQ3E7ocimB9Ev`%J3hX)k!~i}_k_)a`dhz2Lm(7-mHC-A*fAEj z_drNqEJmnKW=XUMWOeikb*=%N3xcg4?}3my|F~h*aZDe@Qn60z0X5aUcaOpi*9?Ce(oO~?TQH#$>Ar!Tf zkDraBtJJ(S7S192#7Zm`k$Ld3A(79}DuMCL2gTBZRZQ17>dTm}(I;U$i)hCURgfWx z{+r$#AqYQqy<-Et59&>Dy*jfuWoh+PrX+XWtb4xc(bER#v7rgJV&kJC@4#Rz%-tx{q59f$k&vI;g?0Qfrnj zACN=P!M`>f5_mo{oOJZ%#?@7h4m|-YSe`6aYKuBeLG+u0 zR1m#J*LA5;8$qN+e!bU;mDDyW(Yr+W!q z{F92^g?D(JP($JrmWECA!u*$#OZa5q6|4*^Ce7WjRCt5e3CFIDDx7sr5^1&Y!nm%| zEnTJ3u6Wi}I#!0AzL9lx^hPJr`QtY_5pD>Q?x<9-5yHfj_i1?SNg-?}a?7xhx(mFU zcEj!$BvWb%rYIF5)%~3%pj4?P03q&N^sy3|^g#&Y9#AIK0%0^oyF(FksTl`P^MV2y z8$knG|23qtn}(HjcB+>(0+%?1)D7PnaZyT|*9-+~DBfid)hxMEINlo5W5v^vrGxTq&J9~(V|BxSDeWpie>?<3**w9LTvxNgws<(#C=>z9XeD=KOnVxD!QRGRGoWqXbe0a!C8V zuIs9lX_+@Nk(udSV(%tS1Q0MB;|m~GKKvb*kfs4t$#JG+A@m*)`yP3g-Lq3rF6R(x z7@0=Ur#c7ovZaQ;|%c!N3B zo&IgK6ypb=k!={0M#*!ysLsAPmSErySms~9f!ga_W&%XV>KmLW$gjBpvi>v6iBjez zvr^>6N>MsZdpQ?NMd>lRErzO>^h>oOU5^WAR43=qgT~zGCs=X(v4D!qKqkv`%}K+G zU%!^- z8C&!@EUgryj8B6%&0M;JAU254KkWox6=P7wN8Q`0>*{PLhob+%(jLh9ffasjyNf=Z z_YN39Z@$*t*zwdBvb)sKFcDY&wcw>b-FRU2yB#`vhxQqb7#VN0eYFufJfpCAAie3==R)J~NHdd@F6HClI zAZI@@40zB3@UFt`*Y^NAVjY?18_7=|pG=E^<*Uk9ln0k$iT4bnwF4@!#19+hL^A(e zyNd{`PBR!(;?ssXk(N&tKL{S6O=|EF%_nu8u%st@aY*x@)OB@=2PF5~ z>18XujPo!}{J!@r>QqR+hsppVj{;soFR}rTniHK}n@a&=%|;GsFaFk5zOmmY$KOzG z-{|C6P)Pk+g0WOP1x_}6{YfCVo))du)x9_rJ)#cU$f4+;cPG71STr?B-`k~2hO)+B zHsK5)d+MG+es_c4Hlw|5g~;`_UFON4^`P5}GIUo2n{ zPxcW5qYf{}D46>9q<8}cMp3`zkGih@Ym%<35&HV;TP+Vj;;d5>*qq2L#l6p4z~H3sU|_V! z%10EeI0+1%Hz(Hq2LN00ngy&E%s&I8j_(iC-fEM;Rx>bqv>Z#p9!vsz_)aUCJT+O@ z)xTHhy1M38c;H_xiDc63V`QT2urxs|`Mc1hE_{na+P-t96NQ@|ABYvcn*lZBtvIbVk))%C)b4Mci$C0&nn<5Q9gj~?hx4oH zHg_HSc@bz#gC_`27HYb~uV=4;L(*T!cyVvfDG zHz^S@cAz+pxMMQ$w{9oWZU+8kLUi??!q5C9ae=&5!WPJkq^6b76t@M&nZI+6S)PCb zfOi|6EwpA|zLqqNfhMV3%)$yk@EDuUvNwzfLb+-+o2z_v+|AH`t~J~ZLQ!VjVP#D( z-VR{GONm@!+#PYXk{=`-qNZHe)mgOTJ0gj^;8hF*Ck&l1Q)#m3Bs^WMFM|g*?@1hq zI*FEx##8A#GU-@edffsDS*Jgm#G-8&;1Rl^{n5HnLZ$sxlZ&W-Le2cEDE9h_^`O5Ohh2)W@F~I9qyv89-NmP&~@SlHm<&T9Ge%S9ALp%FQ zhpzKuf9FJM5oF=v2O3S)uMv(|a}4;=lKG^k1|I#=7geoVjW(gH+#H?Z(+8;v&fVm0 z#nQ*_lhM9%xhP_3FP1*`Njs?`Gu$ME2c;sk#R+5aoBJ;KpZUbn>h|tsZtll=14?7c5t&FXorxtiAV+T?_C`S9UVu_$82^|!02Zd&c} zDk!@|QvpmNWLM^qPa)({A@jYCz}!Da_6r^v9#B5&ZIw^nQsIN$rM5%Yp(kx2 zJ;T+rY)tT8s_^%-%&2PMkddif!7LXS?T!pqIsu9Yva)m8l>Tddhu*;7k<04D$-IxW zSa%=(dIR?K7I^j&B|nyrJ%{|grAhg;UYRthAo?O9 zxm?LFI$?P~CdBM$BW3md*Jx0 zjiFTfyvDxgvllybV-8b(V|$hC6La0IEQl-;iueOU9sjQ%;H>L|fRQv&9sO5B7N@qR zOk=`7_!VAJXuM+g-wbzf`h8I4EJyngh%QZ37uz`$ll>n$OW8v%O`(z*eSv*ix#)#V zD8Wun2nP{GU#2`O2YU|s`$&^=4D{t{cLQ}#%Ei^&C@#p>;W@F;PS)vYXuR2r!>#_l z_h31TNjG=oW6z#|9n7b2V=eUuIN<+@a8S=4|93ls5gMa53S2#-duYk;-SB4ZC(1)c ztL(e|FR<^9_vlvBUf_^cW2uiU^DUQx%)a$fTF>09OnC>EhqlNI(}Fq|_zBB%$Vndt zJ)8~t@qFw#73k(Z+2cg?V;1j>!@A#&G(Hp5dL?PQ%ui$M)6yK|?B4Ke9n-oFd+!GqD5|MOtFVzgd1YY>bU-zv zY8D2(Ph63BL>*OT94cAjTb$QO$Zykup>@L>p!UAt(@oCT6QQ$*)+g#a zPL}HAOH#gPvwJxOT=Yz$^!Z6C)t{OL#C{qNkbhfcf$TvbUnsR`TA48S*#rO{^@;@; z>fX*_z=qme{WI4168o+V?ikc*MP}l1(t{I z$MQmNvApOok5Dm@SBm8&-@*SrF)I`iLI9;eTEEqxchQB=gTPKdqDrdyVkw@w2%O*; zMadJg$iRSpZUy}(!M*&)G|<7TXGg>5Z(D5n{1o!}mrkVg0U7?(-&1`0{+u{XXD3jw zxd3*d55T@(lQ{4HnvgK>zm5aQUVllPpw}lPOyw>Ld2P}NtJ$k*GR}?k)mJ~AIFFMi z&;yU`6GcMesiOJ_?ee=-iLh5sNSO0K_;g*JHb&RgTSn`;Hk7c(h6b`BXiGbFUH!0F z*VVS0bX~n>w1Ka1#DBN?VsX-2xpBI#rU4k=aiJ4oy^*>oc^ZZA{tjJNXBNY_vSiD+ zzyo2`cwJXpim9kkp}k3fM^fO2pR@qO7BigyllNJMS573@taExau;FF6J*%8*fQe<+e-jaUz`Z}z_pxD1l0p4zQ$_VPN_X>?C9;BJV<}VyfeKNmN0XpF z9;;i_6dhRCRXbTkCyMB3fG*`Ks5>8y`0vDWEsertetfH0_B{Uxxx`Myl&i43!blqA zr6r*b_|vfb4A{2%5V@ma8?0C!{@)5wUM--HivANT{Hur9uH{`r#8>SKj;@RXxq2C` zeVU2o<@6ORS%T3G6lw1;CN5EIQxlc?iWk0pdQBn`e^!!!@y;;-`PkCTGM^1Or34OdKahcpyLMH)3?{n!WzF3ktii;Q+)lZiZHaK~tuB&6b z^!^kg-O3L8K*+P-V<1&|j>B?VJ+%q1p}7NF{Z|{6VIbM7R4!s;mdIUA>5R=10Pg`tIv;!$tv1yr*vKY`_mlK@_sv~K7W+epBc;ieOXRc z!@{g}c}o`6_&UaBIg!r$vYbf04blf1j?X1eb13?9*XPH9N%lRNFUz7DrtiQN^`iq+ zMWT_Vw^iB`JgxirHK$HTd3a-Ez-J|hJ^}C)*^&F9}7!XAcU$<6Z&7s zZzh+#Mu|Uu0gYS34g7%%Ont-d3!LoSDbwH}$+h_cC(@gug+#)_GXENZFtV6B)?Gj` zQDDY@by^c|2@#js+mbe=~&@E8OUaB`#`6u#)}M$)qyrsnnb>^ zP#7dCesu_pZPuSTq|HdoPm952t`9;56k7YwH7BPrp>Xj=wq;)gV7#AN28X;Vv*$X zHEQqEUD!$JxIbg=E0=c|>X_}B)V0GJzn=YTe;OE7ft8J$Xr43aDQdKG?d?w^)E%Go zcOu!*(%*@=Mm-|&4gJCIWSa4k&sKEkJ3}H)g~U1x{ngvatZc(dc5Q!_6Z!G}P85BK zL83%Rb#;GOK+Ean$^Og^G{9wNkxjkN+B>Za=$YTAr8)=%zfZ(Okmy8lP zG4Se6vTo+20f75}RtTD<$zrZNU(9z$qB)_IVkI?wv``nBAcyJ+DvOo0g3&@<(*!B? zV?E+II_s-qB{f58K3Xhns5a3XFuVM*6SC43#9pd)TgD-6Ll^l?KSSrpZ~7f|pA@mO z4Av}-WB}J9rzrWvbn-S_TP`})+dHXj8IJgWPmZUG)2{CVQh-6MJ4iYRwsHPk`JZIC zd8fHXy|Ium$l2G`tO70luD0ZTcgIb-$u ztwBvR*%3{eG}Q|s26k&X+1&{tClH3d?Og5u5BlqB0}i&O&KJz}z{2ME_Es0c;==D` zKHBO+vZJh@6QwvN_H!b=-qX17({pBqdZmpXq{wD2m`m8quws2p0)J>o6@ig_@dDSy z+T5MIK2zxWtRP*Kk}gW&j4k`bDH4YpiXa>cN})y5lv_Bn(X&;k`H&`+EhYq3ET*dU z)a%zCY~zIR3WSUxew+?@A>@iotW?^u@+gNrhaT^PmD@QiRiw+4#bPPz$>{S~@{1TN z{OvyG)@O0Wn=EExX}*}Fp8cb#&WA`3aP*o?&J?#~%B~CdxLCyHC_8mykFLh@d=ab8 zw>cF32}VR-sQr$41eD_7_KI!lpBHmTJ4_aaPqfSBVp_DP)F|z7x1Ks7?F+XCj+0?* zI~PALOULrH5Fba z({lSPXWoJ6e++s9+mc+C9&Ysy47TfjCsrnLSZXHy+|K$r1lcsxaZOPsyh$0!;rhc6 z=TcLY9{oC=ek}q%lymEY<=gF8ez+7%U*h^7FTv7YTpzt8Z-4zaw)%D(p4y7J<2cyP z%`e4rSwUVy{SUT!-G-+g!W=01GjYnpx#3p-jTmZ+9g|wIGJl8-dp=v(4d=DsypOSR zlMQ>eL6nW~EHcjuEMJgua+uhst9YRQ)A8Db#ZLkiCXcx!myTN*?hd#510_?#ZsS99tsm->BGFB3?HXTuS9;RdCauqFyYx4()H@5kM02#{LP!{*VV+CB zqc_vTO+wB4HiU$lM{S0JfVuF3rTv!<%4yO=5g~M|jq0Uou8TfcF0*6#_EIcA4E}wR zVZ2u_!P1vxW%!svYJ522?<=)p&we2|IR&8-ITK4+Ul7F8dWJ%11U%e^r7zsAr%p<( zSbj7Yb6@(H>SC-izq<{4P6!70b65VEJJ$mLJV0 zudCTh#+0~gYC~wEQl4(}NzUtV@C+v` zT53bc$I|5Zr1I+sN#$Z?p$!LhoAAVR((L(6sF`AF<)v~OKT}$wJlcs*9m4hcjX3y2 zI(!H<*!Vo33QIZ-H7Ga~n`iT4(Oy#jb&3y?k$kt2iXW9!1nhoHrTLq6xaW93D$Sw`tL8{8gP$J!eVCG z4H2t}vU6d};hb3xY)#~Xl_$(tf(L6m^yt?m(lM18G1boIa)O2V0;NPy`%+QGv6eSoDI5?uxUzuD!^Ta>tV21bcz-GoN1mLoW}} z%b)0_nO<(Emmz6z8ApGKY3vm7++QtX6}b4Gz62n)hB>6Yd0u~}Ik$Dao!K!ta?yl` zJ@y!zjh&t@4q;)-H#xH`Zrxa^j?Mp~4n6vvIy%gu=u=(fccJpDCFLnrcsr&WzWUK~ zMv^(%aX~jHU>jq?#7O_{R_tjJYFT-$h&C*FS0y&`{?5g*OrZ+jIg*alNdE=lr0TR=k>PUA1QV>4 zk+Z(g6K0|y#3AiOXIJ@6y2MA@O$;Az{UwHvXzKqVKE{qT@$ptF;Ny9?6fRFq#>bnl z|4;b%rKl@>RQ&!N_|WZukKAAkA6*0(kT3u9yXO#KemFK z;H7j75`RGNhYaP8dQ)e7#reVy^?p>WRHN%T6#YUCJ;9;qilq5+aEbMPu)?ceBMycC z_f8kfWHpsXF#vH-3HDQ@7kP5E*FR5sc@`I zCql3td$!HSxc=KzEcpl6l{0nz1HYV%1A>JKk+C>p|Mbswx0 z-b4D8n$_JeatP{ls9U?iWZqHDA?=IKWPKLk>-r>o(O_UKJH~eQL$d3Q9WKOe)wdF< zE%rg-roaIj-|diZ>_KDmr#7-EbHeq1E9(lQKX8Cil_MDK+dwh8j$(8h7sIGe%=a`d zn9uN8-$4d)X_o^bnC-C&F#9A2n4QO+1GD2Sm|e9q0kaartnvbfv`5+5!$j^Njv{v^ zX}ygg_g)^5+xa+$v{%om4;B`Uu_ftO8hC=!D_{@e*X(eCMhx61SMPAqvm9aPeSn(_ z;U8hc&Gzqb(IQVDaq!?2*2ktRV(_5tqY6a6n7{|yt`NdB^&Ni$y}=3LG4#e2=8X%; z4NyA6@wt?gTQy=IVr|tf6VWR)R`xmf{^|Pd==Z&V9lbXUt)8eG2`;De8 zRf+bXiAK{e3lg6iYBYT~FVW&Nz-angQd8$Z)>M;Jq}zijmcyISUq0Y`86tg;LDN zzzF4O6vF@FVyGooj^nsZN5Pcf6E8bz=g7n2a83v>W2r_i8-7d(2icFyNy{AeV-Wc< zJ)uKD1MrI+)|W1X^US_DA)ICQr3+!f>`NEIHjbrBA)Oo-(u>9X<3hS4`}s6|3FHX^nR`M=cT9XP4GC8_25j@DtKHAU@*ZCBFx4^@l3#%#>NBf3 zmfW0Xtun(ouD2R$GYFj@HZ<>>U$itqe#0F`(}+2V=H1tfCQnk+QlqI~Qj_0kYMPyB z+r8ardcHEz;6B1=nm*mqMCew%)MzS6YO)(mc}Yz_XRxLp{*>qg&}uX_CN;fhG`*VC zw9aUHGO4NBXnG*2={}>$m((=QXj1%%ILwD8_4Hg0Y5p^b@u^C&*hYa5|1>l4$XP-JKdHiM}uS^lGGY<)68 z5-YC5_w+)s{GD*(du0hihV4;MPC*!eyNPA`EH?5ENlbM)jpm<%?0Uo2LT`R+Z}RCYI#D5HPShjwk|)V8 zPQ%LZ-dNfPTI)9LbLuYrV`bu+$okDnrzRn)Lm}DE_nZ16iGO~J3yhGk5URJ=Qhmdj zPS7{d%lZI^w71VD#s6+f^3R}~POvSNrP>Tf0v-q-6zUwGZHD>^S;0oG7k>qLtKyit zsV~d+L@hr;o#Q0{3+#*T$HBIgz&t>U@Lr$$s@v~+FmRtxS7xhfe=yK}6WN=iFO!!A zddq#P+aH?w(7l0FcO?2$>_gvQoG=wxQazFDdetAc1siRUE*P?!X?K|ak;5nHHTnwq z3$KqpD}=&u_-^9^bKw?wdI29+NTA2ZX!6>`qIa{1+odq?CB5vDc}{} zD@(nvy8Xd>h0rgZv{?w>s_Qy{JUZJ?^P4fSduIq?4;c>G0JN>_f#@f6C@dJ_&e z&A$>)J&N~ykAtVZcz}*C?!=zs^S{M;$H4<08gd5c7k6?eqMzWY5$xGIe*k8A>;2st z5EBxJtn~Ocv%w~%OOwT%ZC%KZG5`4&QqJi=PtI9K`%%sI{YLh2xilTiMw0S!S~(t^ zrj_hBCsO5x8d5rRdK5D%hc6liI_D3}To*LuMjtG%Xaw)BW1C!vE{yzZE<3`Tz^f_z zIP}(??k;*0mZ-Yj9oa&oara>9V2O0pNX!)N0V3fEpY%OpBMM^9yHwpaSuFL*D`BML z<*cnf9rg}#qzM8n_OtTGW^)+@JX5-VZ1c>7*=KzSTf}C^2Vdk z!!s~)&liu6!b;X1K56nHyH9%XPbofA^hff-MNP~SyI^&a$lt|lr z(ouEeN)Gv?y;wQ~c}{)OgM{6`10HNXX)w1a41bS)K-#T z%u&-vAwX^}W8GxKR4O^s$I-M^03wRXro>*su;P=+du8%InG8p!!_l%}@SQ$MFOhco zBvn266dC+cES=K6fyf)bI2W=E_?Y*fPkMuS?R`MC`3nD!d^&WW845=}DwBL(j{5tN z0o#e@Vwv_H8$WxyOy90u)k!eBn1&EY?}*g-IwT%tLxUNV z)``?*dnM@06I~$H9#3&ddko^sn$@!_IHXJPudCQ&t$L)r~z zIu{;l8~`R~X_1GmmjH;OHQ+!V`aC(b-qearLqSD0~*{yu-`|80MvuKE)e-NPmN zr>R?dlP>sM47T+ZC65ztGRl@6UeZL9y|a^Ex>`Jh#Xpg4EJr5W=mk=-n5(|tnMrv=-hr$%H_)A2 z>W&r1t#6tSzF<5^51m;f*tp2STe3@y8dUuzkE+efI25f{d)Pa4ncujPaL+U`90?R* z$x(i!A5xEWCoOAYEg4vHj6Bj0HSoDd`ax*C_BCYREF284ZB*xzJ`ZE1uphhe0J-t5 znmU6++T+RjjT7br2s^1BegzIk7F_O*L@%jL9f0RXYxL+83l@&&Lb8&-z}2 z`cKkcP)}n|;{q`4<=|jj`obw&psa#aF6OEa{MF1ULY8x0BbKv7DmQwL&h}9s8?Ko$ zvB`#hGN4<;G>nY|dpYit;01`Kt$*rP&yQLY{dN9Om8Q8rfiy>}VKO2AHF)*|uV&!OnQROtZ@MQggo_db-UZv;IF zXkqE=8pDwVVz5mYLMsqLg}SbPL~CsUAb}Z7VEiMxD3q^E(xo-zpI|$8&iWBdcPFKZ z>eA_ySaYz9b8xsR(2MTR4|M3!=l?&_-aWpFDs3D;(jY>uAYIEkYFRJaBCO4d znnKVM44P`yf_>d8tXmb=N>f@Dq$LS-97aLWRo4qEy5bctSP)%JDAE>GTG&-CD>rWw z;wp$jOCj_7oaZ@{Ow!hOzrUA1m}cgj=iHw2oaa1ObA*V`?%)Mud#GydGD`*^V!@^? zxM@CgyFj8tQcPS^=YRYF^aL{QFmQIbvSop=CP7O|*u1v+I#i>TZl>(HL47D((b|3BB8 zvBBDDBxUThdVuv27kz;8`f3KViovW(i?0M}NI<;Tz$Jd@dbGxpFz^;b*Jco+&4Iz8 zp(jWMYpA@Lwz)6`a+?eC7q_|4o-q>5q;e}=cSWhIcK(8qRw{o+kL{-P~c@C7Vw-m+KoAoA1V6#^l}+r;(zgxZl*vF?ZW;kh~B(0wcH zxobhbQAi{E2cceEwH~-5mr!-0jj?((HjJv)&O35>umfeDM1xsW&9z}{CplNr-umN7 z?kJ?@{2l6+3&0)vcy_wTD2Q1K?Ej;grs?^J;LK4&GEhT}R4b4S*DkhHN)64Gl0#E7 zi4nuiB2m?PN7YbH8rk9wK|5E+NNUKCwe!aC(@>!$p^m!KlPfgb)P>-$j6;)S=YkPX z=Bl$0A**O$V|6Q{)7VHNB@%3;ga5!#3a;443EJv2@&utb=0&$M0;ko?XZW0p0U6N2 zTk%UZkOWq{Ht~_YZ<}0%unhhaYOul+;<{eEEqXybrDv-34YQLQWb61_1`x^H|0s~? zStt9;-!k{y0q76US4L83&zL_7B=+IQssGaU%j-hAz5yjT^5!HGFx*Ze_5fJT5K+y$ z3G(TW9}*ly)rmut1$M9M9%2vaVi1x&$$qKGi!yx65H9JnNLt~{lYGr>p$;+l1xK$U zgW6OewdvyFGu%$qMw6On_^*H^NlIxHd89@_HRkLR$V>bDJ3x-^wj=D;~E-|zm z_hk!TZgNHPN_+>yYK@?v{#zM9CTVQXZK_m0T`es3@RngPp214mNRJaQC+(&nD9X(8ajy z7_v*9VGCw@l}WZK#T~KXUS-%cIWkS|^vGMNywfZ1o(7?*GT5D%7I>W<{#+_=@j5$d z2YFQLh<)l&sXf-t)}lxUT?Az-TuRQ&Re8jt_#7U!0!9E1kh0IA3D+x!(u&igO2LDv1o!^4bv+FTC)wDYGg)q>tDZ!HdM%Y%&P+^a2t=P@l9WH;{5 zwRu5-#I`9pvk>keuk7;09{0*&?H2asK@6^@T^Gbq#mGEI*!M~Nl}FcgZ9-5W(XrrP z!~P;VPY~9roz9DW3itXRDj)O68`y^o=s_oyw?-c~zJ2b&*w^0#65nnyzU|dI0}Rww z$YkhIsL!J|_}C0zr{ulxcQ$L&X=q#BMG$g4=Mi|rzSj-&$S$7|OBnfGVar^<9Yb++=% zv3S4Cm9+n{5EA=G^0zvOkl1Txwtla6+d_fFR>CT`Yn2Oe`Ra5&Lv3vBE{C_>RNhPF z4M4TD3*psPDz_GgwX+u@mi-Ezo$!8VG>!qeoca7_u3EoWvbn45S4ZILrQRfvXoJaU zE{z^7%j{$S0~b8?9$PRAEoMPm(jCjD$~dpwO6AX~{3CEiPT)1>6nNE11=M-OpX-r# zQ0I}_K{T+z5$mLZ4ffb(u5u}t99}#{W`F@}s566*;@wo4L%r%$3SrCtAfOP?9vexM zDHOpJMyZtSUNn!HW2d2`{t5mORJqqdL%M&IvY?N-<)iEv9;kVMz(bzvAnrG;!7({{URSv4&@1V}jVyGEBH~+R0u%wQ*T@EBc$$4lc z!=-y&RGnk@s#6`Ij=FJ1EKk}tsUTrPImzBO2^m#n|0<=bq=8IvSi8QF?Z>v_u;y)q zl`Rfyli=M}?e~oWiG_>9S~2_M0VQW7w0gfi)KQm1+b+i(WOwPhUL4kP8U+HG&6Ej0 zK9@&AM_G|lFzt+b0mWD&-!2dcc=`mhl$_1aAz3el>ApyrS|VRnqU5~&9JJ!1$|RuV z4)}bzOUZcxzc{Ee2`KqbFs{o%K7Oi_bMJG;1`|Uc!>1aTk~0e{*{L$gZV+TDUvB4> zrkE-<@=8>>in8r@jgoUV*7i|l5+m47DqrrS@=b+GPBwg*!`^1&RMY58kRmdw9KRp; z$ZqRu5@1`|(a3b1)QWFVA6&>S(DC4$qScFcR4yZktZ z@Uwh3?mH@%LA0hBRGHzR>b(x?Y_0pXl0O1~)}2D-sSY!JZPy0RM^~5`cBu4tu@qJcce`KQEzNSiufo`6S~V zTu^+k2V?$^gEHTGst`!DEot70{*%7gPj%kcpR`RcThAsabSm<6@{z)z+f8sM{Tyfc_0e#3n3ko-waK+_P^2KNV zJ6!qp>49+Ni+lUv$|ry;Yo1QWmACFq!Iejz?hjYgdri2~c&`apYVYN^0*DgUX7Z1x z!>e9KrF`&eZ}cdjK#y16t6g$0M*-;VZ{gQ5uiU9g_X5Uv<*m_Ijp9FetNLIQA`GNV@3nZ$Y9FQM)vX4BJa+kmYv!$W-=!rOM=R$K}B#E{2zE#%~ z*JN!^2FCfgGTkufQC6(Y0{w#VbT_5p&OH!~f(fBq4=|G{;9Qv1JOrOV^A z`yWU~FLRjWadWThE00s%QYY$jPt5{-F2<_hdNP$i6A@jgjjCmZ!HgiGioBFFg;ijW zKvnDRE7A>eR$6o(V}wo2luTXM_MMFD+Lqq9-WMuWfS)w%}%#1!TU*s_H67FFz zzd8}uwXMhdhv}(i0A1Q|5#Y<0rvtn`Q#VCB>K1fedxQbJe;|M$F{{@0^Z>xRJ^-y8 zAckXlkpY~<0G>Y(zy`|IzuE%`^U#!Zm~Uq2sWFax3~a17u4|w53^181_+&b?3lVHr zAFwkrbgqhFFi#L&*Z#s_E*t=+rw~;*2fA^~_5nDU1H|C&uP}g@Gl0bd0W31q{Mv&E z^BZ?M%y$Gm)j_G8f&JllT-SDW4*<5r2so!*gkW##1NJ7tB=cinPwH`9dz^s{3lt;sj=^;G3sYPxW6T^3I;cH~VCB+uu#tE? zHF8lEgWP*8u4~`?JOJeP&5&IP+1&^7pVA=@VvzsFATLRQ?6Ogn76t`zZR2_Yb zegeqdu5`#b>5yM#RPPD~c^p9I$6nLMH^A27lB)y~ZSNkCe>g?xD^CE$zxlXah9@Z! zyU@0}I~WglIt@g+sl0{C!%XWbKY`XMIk_inh$>Xg>3(}CE-}wBTFw~Q>>=!rB8CHO z7JPsWgQ?t%jU~KOAj>mE@`d=}jU<`ZFqiigoCeFK0yGLjtvAlj_X|wLhb4;_Z;O`b z#%@a1mlz{BWSW3kJo}rk;2@Z<1~y+cY`$tt`ZI1*19oCYP*Zk{@lg|-~aLmqGWFo*YD3115biHe+>{NvE_r02qCVZ?jID`e=N{p zVfWK2I&*=IhID@p)L3Ybe8IK)gvUmh>0&b@hF->C zJ*<~8s)U0hge0Rz|b#P$0Gap5!zsOd7qfRlsu zQN>r(w(vV6#e?E2qUw|FJoF9^9xLymGQ3mr&R?2GLfgdPj!Z%VJ*8scVGwe$zX}1EOH7*)R2INLCFs(xkpVcRPqFE| z7&1Q!o|T2JbQXIYFNRE4Ior~A9f|AOjKe9tUxx7qw~4`DK`3$1%a6FkRSIH?>heN^}v88dW08wQjmyXyH4u!>sf}n=o zPioIqtbqsfh#r=$_dS?LsFJhe(-DN8o3sDZ5#-#QJ08p<&h~kyYoqLhv{@vG8+z3| zbfc`HfsGBVns^BLgiB3hhft8eW9Qi2hvK>s9VawjU_e|~dWjZg_xn|t-o$i75C1o! zTQ!nUs&LBDh_ul#W=-T&PSkZv4(;oI5~5h&RxuX>663@aqYgYdk^WO@sKY-DuMLCtF8|KRR1GWBdZYLxbGWrQw6y^4amH|PuwUyt)Twk&z{l>SMVV3oW zzDO1AI;raka2w4()}GXLtqdU^ccvdo4aefTnaO*cZMwG^x;^uKT#q)L&~+{MtGLc% z4hMQldDBSSqC>cII${Z%j-x}7#PAx?zxJ9-7;o!S-ZCT=7mB^Q9zCO1*X74FVkUF) zA~Z&XkT5Pgl^5BCGz4nI&A$jlsm6+85E#fe(rs=o(fiut>@u3)`uqs;u7$n06kZ^E z!xl5;_%w1wp$ejZ)x6UL{OXJm2#f-rq>ZVR3O_*ZWu8<(m5CR5Rku`F%GjgJfY!{D zY}yZ-;usW8&0F)_2ogORH~0}Xf8}!{h+=6K*Y6utn^hbJakbboVThbZT*0IM(c=1j znRBuOJvn~%b-dyF7X1yD`>Gkr)$NFBw4W${VoT13W?X|{T-Sr)XnkDQH(6IV=Mf<~ zgAs&>JK~rz$jr}tNYeQ~zQhjjrBu8U4RzGHI6Y^Z!jVc=wcdImH4`7JBR6LYDO_AK zW?;GG2xhEvO%h33WKBA=6Q^8(U`v(gqEwV<8qBiG%}qDf3nUiMrcV|Kux@?ve{!m9 ztj!{UbL(G*Cv6l=QZnOnLl!aHiS$e|#S^7X-yxNFmD^$Clh0a>IE))E4MRBs51}e! zWZZRpi`!B|h?ERXgm@G~lP%`VhM}+++>nX2D%i-2NV2sO_8&zGUgo3fQ#?5EIO;qi z2I`?2#B~}>m0{GmU0k>eR>fnZ&Mji#69KguHH2rBZ$MCkL63OuFvdBcu@1gf%cUC5 zq`;zDqdPeiR@td=t(2EH=^&?em=5J)-W`K_*-`fz*@A==O5%&uIC zC7WTR%5Y?-Avx6l5hI@BLr6i2$NwLY0@t`d%!uwc@jF!Lgz*zy*Q00YDEfv;xa4|1 z3GNG0CgG1(LbOB0x*l!83???V-vaYa=z4KS^nsJQZr=Y@G!Zh}Emdp zVv)2v7U+691^{=93<_J##|QOAlJwvG@(1Fg`KX_yj7GW`jWl7aY)Eld9f<1*%fcdV z7Fwt@YtZYgx=B=N2IeZzlTl}<3dZwq>LGSQ=8P0unj>IF%~JyY{-d}aWe2TR^bwa8 z#dZgJY+~pcKt2PMAO-^jf%@la%Qy0}3Pk(|W?~aD*ot=m!!0hV=3M+&yeHb&717Rd z4-!LJtC|eM3pc*BX(ul{$z`c%E@z|wyKWnBS+BRo^;kf=s5P!b)QmvSP%#9iAiSHQ zj=Brd20T15VE<%o+QzsZo2XrG^gecgjm-ZsjA~QtpW2B?9NmwzXPDS(h@fDPZ(1GMlbY~kuY zEj*agLJ@0W%U5w-YrzVW`n2$$9ejA(%{e80!#-_;PugEE7D)8ggoZ0EppIKAU@iT9 z5XL=Vt6e*rsyP!DX8_R3 z$``Ss=d%CKt`ib>)+g^2%%}g2;kY?UUfTXSRY><&WjFXh_vvErAV<@MA0<-5*;&)O ztT2`m*}?+~0>Z6%pY1Vg@BNTFG>CMc;bkaXHw>i*3gPmax*K_Y?b1I(zqL$4v<$)C$ z0A{~{s(GI;X0xBW0f>87bd0W>0BB=BgoWMU6iD=wbPt+T{!HobS~O3IbR>^le<%HY1F$s!U(OiABKvY`)0cpVSV((K+}W zWR+keW@jZ?aZ!8yH@ex!#@ND1kBa#xbUn7igiopWQ?^gwKzJl^<9r-$oH8U+ zw!Y1OLI0yQdW=r7e#xQPXA2~@BW+}9KgIP2GQ~hWW1QH3zg$?0uaBpNAnnKDB}k~X zkuytn<2sm__>0#f^+Mm5wECudYhl{GH8Ztd|LrI0TTe6$CutBOE&V{Cf$s`g{Q_3M zAaUt3w{WlxOWrYnbwe3us_t zVXOA;yQWD@-T!Bp!`FiUgLWTq`bG@c{P6;bZ86P%+WA&0r299f@Q3|`qDCmUH*x@J4xe1(nOHlCRnAp{&~U+d<{B zn&fYOcPMM1%9Y6MV}Q0{$=__>2%k~;Zd>AA(+0dWl-U!n z3F?nth3U?{hN}6OE(MwbDJ@a-7riyFPzZ4&TD;7xfyEFzN zm6=MJ46&t|O=z&A?oz5yA60!7!3?VSs;E*?LFJKDF0)a^SB)#^tKqBHKxGUMni3!0 zZbDiILtXjVJB(INTjr-k3#E|~h3h93+1 z*#C(wc@+(LB)Cls77GxctRT=+B?cIwz#tB4{)G*Be*2@@4;NkV*IVC&m*RYXpvxt{|m$OHZv`jJUe!rMlzWD z>hlPsHN{$WSpX%rf~wZSdY*ylp`w#IRrAa1^GNJvc7u8$cG*C{Q{I~x`m~=^wN9u= z=VUJW7!b5-4Lo`-5D4Vt?T0?@?X!QmL*XzQI6kJfhN{-7uMHLwOK#3iplx|SWW)2O_MMtU!! zk=UhPHTyzOq$k6pjxMI5-S^w5vv=NJHjfX@hT^?6a9j{W&J1=1*GVHGV!(<&x-$t8 z7rzIne?u0H9L%Im$1&B*0mx`G;XjWJCt^$PI2t%USqyxFFV{UjoQP|4ucsd*CewM$ zCH|$&Bk!ChZ}7<5J5wW$lXCxm-O<9 z<|P$da=xAek&Nys?ucE6bOY4Xtd=@jK*xR%h^pg9qBd$}*-35zr`XUNM(9HB65^CN zyXLH=GM53O$st!Up~9M*kR*fY%BilZ5D>^K*|qm>K;bBjy^#>gyHXNHV&khV=|k4MMiJN*oX9|y=jU5}m$(U6YY#83Nt&jo0-5Q zV7d{L>hGj#eHM*;pGlkIh(!-V{Ig45T+aG2{!$ukVv0I{v(GL65Pj%`u6tw?Y4gbcWOaY?$_G61 zM_zfyG&vSqMVs1izJAD}O*+okP6C}-{I`@Yw&L5a<-Cj03N{<+vux5l&R%gLWfRua zh7g)tz9(KJ!nAiRT2y^%~c)W{`xA5@_KAy+N)30#?KPw66-RlfEcOe`{ zp+KT%9tQ$n9FCnyM*7CwGUY>xqpJy8S?7kBg4nO~>q_vahS0@J&Tc9v^2a2EaQ>Nw-6Z(yZl<9Q|1{9G(L!-8t7=V*SrccA!r*X!o+s`q5g1Et_8J{}O zGJOKw>4^=1QfIvaj}Oig2+-Vt?w2*b3i$h z2X`&Y7O{oSYV64nLs7zSN_>{YAcSa^yQor8Naaz+RH|kAt+KC5$?LogLbTsX{Jjw-CbEG39Oh!klb^JXBa!oBa-+_25J87WT zQhOnU>{fHmVJ)b+BausBP5kj@qY0&4DvDmD>$=wU63W+JGM4Nn)8vm~lfM9z3ECKQ0{r&sU!T;A!^IsjH`{e3%@#?kJb}VH8{7AItF}`a zozeU53Z+uAQ(+gnLgNJpOgS$hE<)zK%;=KQ)|Y4Ua2r8lUWwX5SOsipxvc^i%5ny0 zxvh%FlHs?@H&?;1kB=oJn%AE^)FCb+Lb|>S7!7Q*LTu@xf#VjQ2rzQMf4Dm17`??FH16v8FJCUM%t=9 z^dijAjzU7BU-r!h;Sd+SOVafjRJDfx#G8D!fZ;>zN~#RyCb{D!7}MD&c!K7HYTGd5 z*h53R>$cLi;4s!PJQ;$+s`%e}c%r5D9HWlP%T0%M(Jx6}YUwa}gj!k7q|qR4iEuzUDZV#^k!Ve5>; zQCWirHtoXu3{7$<><1A) zMs@9Xv8ZZ&^jGOeXXHK9FRg^fmn%7$f69DlL!gt#-{YSiyCPk3G2i&Kbw>K(a0`Op zEfEs^CJ|IIZGZfjk$#^y?GFe)y_p;4?4e_~Q}sTbhI+-&3b=wy9L4VS3me#Onb12c zkA#kjp|3zS-i!MMqO!|7i;u>v`X<^HV;|U_`O($vm(DAUpRu+&H?wl&V(e zOulg(qm8RCC#+`x5&GLPK!f2%v1R#4BaoLy{dOo^_l`?!*#rTF18oNKDeq?j-s9z` z_FO7|#@^wk`8}1_;GfEJEXJ6DCGcnL89Ws*Mm5~5M@wulHf(s9+?t4T31S)t@X>O^HA*)kF%@xbdS8r zBg79)6W<&!m5R$ZO$iX6*E|#dYngNL<&pVn?d4Oz+5`7C@ulJ_C=(@ey$dB>L)r`Am}j z%V*mL^j`+Of+0sp_@g=CcGC-%SazyZ*pxZ;K>QRj*a@%3QRiVl#MwsL#O1GYIi_xe zpq7NHH5ld?kG@LDmM}&Kf7UZ^02Zh)(5=^H6o)Za5x0R<%1Y?DWpF26d<)R3G8d#q z9J`AKj@!kM3(Ugv22aceMa6s|o+GHTh6lsGlsZ4JA3>G-ZPag?N$VCV?4UwSAsWQo?4ClTrNy$m63lHX`IXxQh8~Pb-^a)+xgrT#A zbZU*z8{6s0iT6W6WLLK6RuVr9MHA=ZOKD13-a<4#$bF-+<5zvMY zgqnfryf)+P`$&9@iNy(uUI=@I|ImQCmP;;^c_*8brHMHisM`>{b4W_JJ1j=GKeVKE zdr2mCJD6$e_Qw^e-EQXHhHX2OV`gMuW(sbwD@?AGVsdJ>c(fx8g+SYeazL(>3bX~7 z6)}Nm(Um6$UakLP6%C&hMiWL{O?WuVnT8D}H@>Rox65*=Lc8-l*x?^arX3Ce4VG_= zW7lSefhcUmQ|b21bc?q;_%Vj=Ku=>azuDsn627>3v1)^K@gRQZZG;LdPBB%0#0w)f znn~A$2o%c}(n;5Mt#MsD2S@RnQKqj9hqasDjB|s(erD+KGy_pkr<9zY<$P&pxOk@R zYbaRjYL~8uhAwxQ8WLGUe{Ai4c+*;;!p*tqXY}}1!y$iZ31?rePsHGXTJfo_|bRa`z~`b954I+{)V`&9b;dUbRBu0s3@2p; zmyDE4)!Osx0QS)_>4>}~l8VT^c&7c0qz~@7h#8UpiN#)-lz#Nx7wLbcXX0#pJ~SPX zr$)?(Jo^JEHaR{d4Ux}e2kuQciknYMpP@%z|AiSUywb!-RGX*AK;&ymK!B>bH3rFII=+Qlb_q~YtEqphwYjl2bcA zn*=-j=TMbaL#^t5DqLfSTMn&mP4x&%*UoiNAx@n?)(@f1Piw8zxu^a!D({JPQOt9awjRZO z?6nZflxAoU*hWa*8e>yMsZv%!<()Lt;U7zt=@rzu%l|Df-N8(%PDW>zm$G^ISy0)= z+HgxG@*VBsPHsM`G&``7x<8=c#=(U}a*6O?v{_(k^ItiJHd*mX1*}bO=3v-g`D|zk zId&I#a~c8gJJCgKY=p}v8OHULpgP$Rt=7$f+>&O9r{xGF`c|5KmKu~g+0cMRKOIxX zydiKFUEnOb!1;1`(i%`X{S%mQYGb^4Xqy;(3HZY$W+t41HoS%rq_BV<`q##EJ(XyI z0Z|oI(PTorAxzZ;``E%BVhejnd!m?)>rTSCDIV!PtGHgY|F0{|UsYNF z#Hk>Wo7}m%el+_5!*X`jib;1JMyU!|hgKj0`bVUENO9sz5#?FS%{)9x|5oEzpH zWNi<0o~S{oJF(vmXJbk|zTX=8eq+xxfqj{S%GA;=cwrf zn~M!%=({0=l!`4&KVtXbZDMFX#Mvmbu`L8Sq13zWY}a8}h{Sf&65EXy(Y8|Vw+(+6 znf|t!{>I3}3bh*nm}pbap5wY+cOF%IMaq*zfE-W3k1VVhp;m^D9@%C0s4irWrVYaF zgt@6~K)}xj_f%gARcCagNse=ixbTk|Sc3f>hY_VTDTXEr_!8|7Jch0#*aC%@K?|X6 zb*CsYbgM%QHd#3my~{W1YzfxBpIULOUwyCYWg5*|nc4d0$K&P9O{V_nBrQZ_Jr30djd7$dtP}8ih6W=xGD9 zrSz)I3?dLl1UV0e!gom1#%3fG5YvH`3uXibB(x9I3@3@uMF`OM&y%{?Vhv2qBf)T8 zt^s>fBU=z5dUsq;jWt9@#P!(D^rOs{ILh|%x7F+r!t81gU}X)@2jyo6NP~-!EuU9$ zRY4LiuR?uZ8MC8l-@gLv@B62l$V?vlkoQhpPjHsn+P9(D_uVG88sdn2zb2j005OW#tyt`7TlziV={2T%fOZ)cTlu328psvrt})-099V32pJG?@V#wnh zehZ5IWJxEtg=@^$%FS5pfUqwF>2+>Mkf(hDhmhFJ1 zt(wiTVvSrr9A~n5H57bXH_ap@zP{|OxUSucV2CwcB=Y97{M21&4lwp>_USR>)7atZ z_xtd(K$1~pQ1S-i(D&(> zao3x1U3+xV((B_A0lOc;#7)%H3Y&m1i?5H^=9d*C3U%y$IcnoSIJM ze)~o|B@7`=XtK3job$uqrArceSL5tq5UQtNhho<6)2YBeSDR_dTrBq5k#qw3+Um4P z_F~BwzDr;0z-lw~ISY$bW~LL+%GG8%8-ERo-SVIG?oC*2rn8@8v2*(rJ7u+*&c5+l z-+cx6`t9pxIxAzb|9qDYE&RH9PHw?sEq#hT@w%CSUW~=w`aYe2&UrnJfR4dA*%M2j zI{JEC*8(qKab(95YB7VT;qr5uXb8|t_z!z>+qxx(wn_$j#^--$ntbr}y6dlq<3{KT z$j~FO9f`h#Wj@C9u3|;~&qHTjePI0cwYW)Ab;+xUhOzWX^1l{0g*dQC+L<^{L(C`! zan7c`mPU4Z5atH+jDk~Y0WbN6QF1Gm{In;XkRE*%OD3WwyzpvV*NXlg*R|81H_tOI z3KHYEp{C=;mwMcsI4gPF_&DEOVOkYxUX_}}s(|8bR5Pr`3Uh!r2)wob2~LcTn5EQU zhPeqoX%C$sBzo}20rS6!i)IS_%G)@(-)saY8&p?}w}m=$<~_w{OJ-Qe+@tH_4PmG* z4kNM8jqAnN5ESG4&@p5_)Z>T~XT9!?@u&WmKstp<|EGrq)C30FE5Ypw8{JMc}X%+U7U+SGFX5CWv;+ z_quK#%zycpYW?{1blQKz<9sFDX46*C%S?n%nzfgZXk)xjeriSNPK=sBSm zhhuZ8e27iT?Nlie*EUq$8q({&qi7xCX9RsTXEyduFlcwukaTx@s%1y)D#Nlv_)T0# zk)fLR^ZXIq7g+I2C9ww$o9y{#q1-KDl>PC1u!_z=0|dpA5xoU2+t%1+ywkTIKf!*l zY~mIj>$_nPRXlbOx7?6uP_D6+B zy+;63?Q+RS$8PY*UG&&4a6TQ==~c?5Y7fIf#m`RecCWk{1jAlFd0yw%`7=P(#s*UX zsu^IlrYZJmN`;N_R3%>Lj{6~sP&L~>Ud-QLDcQAAXQFtQXTou|i;JpsU5_f@DbgC- zVr0`!F#8H$)259}L1FCNQd{Z@m!q)!lBZNeKdY|(#MEci1g~I+n;QkEs6f7vJpU$`fx%C4` z$%ux=iNs4_b0GW82nh^Fnblx8P{~=zwL{>{AA1+T$Nr^_x(Bw;WY)u*^z;CkY5iSC z(*27lw=Kp1a3~&UeZB)Godb`;+O2&zBPonQOD|MJ?1LRWVcHoyFY-A zt-*zm!6EOd1a;#1CfR~?anB07c~#1#>gZ*(rN66EZh_X0I#x}a)lCUT_bpD<}5Yp9{N2dmdY+@nT#P9M&y?3Wsf^(n>S zST$RNYRF&e9UEcH|FNAk(sP-2OoR%D*%W%mZs5yPDcRj}rR31E6FpWNgal@}ys8Ca z<thn+=R_Si$**FD&yb^H+XYw=eK9=m$cgrSZP@z-vjr{zHtu-3dyB^{A+5{Q#c~$7DZd7_RWn zZ1Xs~{nigp>U`dDGQ5&qo+mjpvLq)bO0H;;3Nw^3XPm1gG7_oJtjPuNya} z-?IVN2Cl!F4F^^HJt5Ka^nv}s>~rtz_pB+QhRQN=@)Y%RT-<%q$T=@_Zmfvk9pJF! zG|?2SKzsH-gg_Suq+eGH>E{XW6QxN#KHJyaA?F}_aS1vtgE~s3S&*Q&sRx~wA>fLd z_w)6`iP(}oj0Sopi-809GF&m7h-EH@3%IJUsn zezYC7EatYhX*=_TB(sfRV((M68NHXIS+uFAhY(Urshao6tYklHFQRJRXx_)@4M0}(Y~we zI%@}Vg)hED04vZ8957#tBc`=@3uf4t8iN6}LTdhP*MS%RDD>h7Cww)3{0-p6e=Yie z^$j;*=}lh?b_mz~ni`QYC^A2hXa*->$rIL%7aK~|@sCq^d~#(j%QX&ausV60s!P*C z53ohaFPWL_Fk6)T#n)qJqgTNKEX76Hz!qf#k72OX9CF|swkR1juo}I0@ryw4V7eId zjm1#no`Zx$%lluiS4jH#P7`v^NTx|ZGn*!Svj!(%#V) zs4fPKtdvT`mhnR&kGorJ$#R!$s;j2*XLRglkKCEKif*RDE{`)jKd*ROaGQT^Vz=_J zK)CI5F_<@w^@~9Q5nIYp%*KTo$T;{J>7OUj*gI7IENNMUX|~Ogh7!g`+*k2sjSz6q z>4*N7_Z>TWniVLVViXVu?mb>@l2g%3ajNphdug!zQ*2Ydn6w z82XC!Q*Wp826}7)cd-4pqmuc;v&h>l>HYqEA)lUEN8Y$7;jW2p6`=EftW~U;Y{F@Q?Ek zqwBbtdn0vjsvjQg@MqFvdmtbkD<<>-0%0P+0>`ZbzfLj_BEZ*-T$Q zPW_j*KnvUq;Xd$|1Ilwgq?BeFI!XGO|^xw(<_P;nL&LB@{)^>xMsA!Zef0 z3AHEsjR3!FbgbX`7@L-RP_=fL)qCeLDt2%o?EJUCCL|U~t6wxgzJ8kY<$2|llyI!G z8JP7c>thN%-&`ONqAZmJLgc0FZ+S~(|FGhn^7i7*k!V(Nm%J+yvlnlXA7@2W%P7`F zYya!5x+M+y^(r*}dz|y@OF6&3H0htSk4M}-#GmYBV^7p*!>v0R3Z{p2${=5G4$3Ii&uDUaoGF#K}KhC+?U*{0l zmSx}Cbo`IA>T<*u&nb;Pmx{rOdeT}%Y+;Zu$IxC0Ll?C1K!#pk!u7*2iX~tz(j1fu z1Z2%&ikmh*j{Y31La0N0HXM5k3@XvD#>*vz?67K}N@l6pLMxiRk{xe>40k>h_#Km~ zRHyQ0ej%ZO{cwG2pu$cz$taeoEd)7i4&*Su++G^!wv~#@!|vwE73_xSo2F9Tt6W<( zEwoL1a4$gxFz~fCvEMKTPg{|R3yRTlCRcGCq~euo0D>2j3twABL)#X7Lfa+-KjjJZ z2;#z@p-woSLVayiX!ocz?iO1zrv$^8v0HAX@~2epqVkW?qYx*EFSj~j!H8`0Y8MXW zS}fVnO7dAZ4BbKB-sfW7K;x5ZUGkMa zs!T4T%9Vu)<&Pn`)o1}pcyRe=DLLyH0Jzwz_m+e@>ZVe8QVCTi6;fqt5!mp{m8M|k zh9?*!$&m6~yW$M=?SuJ|OUskYu{JwXAhEZVobt3bhEbj&y=_uCJ6&U2io@Dvtnl|# z?$jn^3Iw8*P<4_{C(AH7GUchP`Pcl6ath@~cP>ReW+1@8Bf8kUvZKlXCU29b6uV1-FU8BVdYI z=Ag>mcB)PXEAR(jAV`@TQz>#V%g{8J6?m0Whgb2~J%MgPT=-1}As)TmE0;Q`zM0Bp z1#ZRq4=Ilr>}=qG%cEL`O$mkTN+ExKkxUa8E;R{R+@*)NS(=4Dvqc~X&L$&lUB zd2CyF+4higQMm+4nmmU|wBexYJjq4X30I3LB&1=G(YNJza&lBonoM&k0#MaSlqyqQ zCfUf$Nj@&MnpDW+Z8ju!g|QhJ0|cD?2nR_TdKDy^l&3O`cPTm7o(azOlc+q&rMRW> z@+6<)md=wWl_zJH-2sx_f7jao)M^eQkl1^h*w@lRE*vQR^8H4+^joLKMTE8F3fyvsliLl7L~Y$N0QRVpAUcJ34(+7GY&sFtbV<7FQs`vy^U z;vmI(RzV&Kh3n>1`4GdHRjC;BY=$uao~|WyR1B?*@PlxeeNaw-Io;5>1i8=5R$+qNSRv5MO}%@({FbW5_|2J_^V)__%Bqo+G=fR zLhptAQU%a%fF%eoa*S%y*kpMXbbTb;@k5KV~f%V4I@U1pf2 zL(mfK@UKql$hwE5dn+M=Wv}DmW@X zu!6zK`E5SqPS&r&z)QJQUbrfWuwBuw7z=XxaYAC>QDq+6k?mSG`}rW_^5u3-?_uq9 zMu!3VS;>tyj=cUv-2HON;f07PfmJ%Uk~Mfr&|bNye1l!ddto$$vOAT^S&)-=6P3Gx zyNmvXb>gueLSo;bSyoB$wpb?BH&Ey1`BzY7^ygyB+sy)jfZrMobx@JCi-uT2cy1nO;C0N$_abiMd!EYmCRq0T*` z+R1o9`4EkKWAQ54r%qFSg2%a844i2tWSYF)6FFeHN|A<7bMBw}F<)h`;yu;oadwD- zV-`X@LeEsCe7KFWT6-;Mu-t(5%xF{Nm#V0m{dOfCee#>$&?5v^A&&5DB5A<9$O|jV6CAiw->=FZI5P*VTmt{Yy4lD2! zRxAQ*!>9fG9xykHo^KK>O<;GJ%z)X(_l2got{u8Bp5m}V(V&=h^=7RJ^#F9J84PZfr40g7 z`-bA(v0r;R7voVWDD-PQYK4#-1Y^0J+eLL9$kPPFACRhd+Zf-qBb8qndYk~i;^k(_ ztT?h}@1xXCU6WpAM^Ca!JHer1W6OaHo2FFQrYU5ab64FJjI`Gn_DF0y!9B?fwj#5? zH|2snhs)I17-DwDDs&^OK+w&r8A7Ec42v2eH8fs(jO2FZ>^}4qXM>PC)tcu5UcPDe z^MF_ms~7Y|!UkN>GY#bzzTxYbpqVx}L{^pBN>s=^c`0xw{Zcyn>*f$H&w7cnlt5%OnfMfa_DO zz{N633`%;j<(e!uSw-3%bmOJ25Y=Ygg18cE7Oy*{fiET-WZxwhxl@h50)O^R>G^B_ukdXFz>9$kY3a#h`ti zI0g-tkcT`N{b?V0x(VKG{izQ^VgF?SwCd-S`-^eK)>dT^^L@4-WkSNRAgT9wzIZM9-+!dS~>SGWjgpv&@pJs5-JdO=leIz5Ju1_ zpz?Nb2JfPggV|KgeV9fLWYA0>rA_;Fkx`|nO&Lc6J$4L~F1C1wScJnExT2=Od3cV6 zs#odqR-m=53^ds6PW(M}_Rg`yD)bKiZAJ3iY1FxQt|in_e<0N1uc3;ADh)QdgE|k_ zmSY%C?+_bx_S9Yuu{`?n_ij0&-Xl=wvD$M>@hQ zPhiMJAg9HGA12R^ z8`>=fPrrK`;?f!x!pR!W6l`6)QZ zrj70Nj{V#tpPyqN}VF(Rzmla{^Zs1mBt{(ct_((3GVADmAbOx{4LC)`^N=!VqN$N#Ir?80_& zJrr9KF%=uaip^u1&woyZO)1 zf6w}KzpQ39u$gX#q83S8kB8Pn;q(c)x!yc)nRDa1_DoG&*JK7a(bstiZD&&dSwWRw zQRf!J@KIcjKJ1B@7v`N(&bm=96=)ee7c$s*VtQqC`r?o2tjgyzfr8d};n8y?JZ^zsRLR8I|8h1= z>oZ5vD-W4#p4M0WjGwqGu507&gbK6TO%=e8_%>{9+$KVz|8x8o{Ryi3;~zp{f0p91 zg~D}Xi_ro2#l0pAB3tSK&)u$OhPj-g%tn>L?6|g4p*?S|A>&-!u0lQ2WWp1 z@J|Vt!!U<=d+kIX_(#j@(<5$nnCKBd|4e<<8mB^6Mt(#{)Zc%8BcK!g_vsqshPbPn zQc(gH2!7Vjqtv;lwoI|S!TRVi`gr%ARCWotM6bDzRJD$JbBHNSjFEMYcX+D@I$XVx zkm$8~pZM)|anWp{e|s=?z>a1#ojQz&^js0o6>mU)9SId7+c`DOl~SR0ZnqAbom*$K z-Kms~mLbqw_WuT?T&igCQt|o5{(K8K#rGvBK9Dl|Cb#4ewDaO7F<$}Z zh&MD`c$rJ6YH!{^NbI4HKOT5Li;JkxH-52Os!?dJ84Viv3NB9s_HZOs8I@82q*l|$ z9K}U^i5&uU!xG7*jCj8=`37N1g4Lc#1}NefWT!^7!Wy=OAj_S3O)iC)k_Z0;bnJ^Z z6CHyPc*E{CNlj?NB2MdJhssa4ndLZ-F@m`Nwp2Ne*2vy&xy^iOyOX_IcN^U7CeoSh zAW7H$9M`oYx5agB&!6y1RMd4{I}|1)`q%g``C9}@x_tuca40-~c$-_QF^mHZJ-An* zN?C!kbN&IUmPw4hsqTdqg%~>vok$F0zbZWO3>JZ}it-b#N~#-)=4O5ULm&5-^!~oz zhrTgN8w$@KK@9^n6M0g+QlU~NQFXfH?417vomo~u)$&IyRNgMG|1zU?wzz(O#+(AG zPPM>bawEYEs#qfFhXY7DGH!)+zW9J?oxwYI>|Jrb1=tGhW{YuabzIlVZiU5wqum~R zlFD7NC$!2B2?2ZB{>S&6(4RjN`1fFLMFpe)!$RBYav0644(=9%7YN`1orP98?*S2Z zwu+0d5HOw^WZoUShsww3u{|^rzl@GKLe=pj#q|e>7+6mT={k698?=!F>X7V_;`;qr zbN(j@>U)AB(F#wd%_GB!P?b4M9!AIRrE(9yPS7!XshWE!3US8npk~jxO-Phd683Tj z%0eppC6`<-NnqTJqVRc=U4si(CFn6m!!#(YjA6-I2%f=R2hoZ=;em`sFl@nYHmaBN z>M(iH{boI$p?2lnGkd!&oD2kWC_eo7I5`RBGygFBEzAjPIq@!+9K{{-7I{x>ka~BP zTaGYNf1cPf(dJf5g;KdE@a+Qm@b+&9_m&=wG}i5Q%WkPqu9VJ8rn!9#n(2cdb7%HR zH(wYE&l`udcbKzt{y9pSMJ|_$0voa-$1}y%8*q~fAV(>o<}{LD|7=2J2%T_fIt=dJ z_X&wD?jJut=odd-hA;k|0x^W|*^*Z9-U%*T0;U5})66KM%A7(P>X`2Uo$^!fpU^kC zC2E>^uR2-xIwK4AQs+)FFa_kW*`HE%;-?;Gq^`r`Z1-R6aqbWUXM*Wl&h(5t#z^n! zo)YRX)Nz(iSuV!&>7WLZY)7WuMFb8io0)%~Pj+;({f-9v@fZgW{tU zAG_8Q5fAHD2)5yLos^;yk9zm#TfwNh=;N26P zg@MGsu6h5`?tnQ&3r9zBN9t%JlpkhHpNhtOL zu0q*WF0O(XoETIpG;VpXrS#czXnTNSx1LoQZon% zO`bHX!++fK8J0b16nE1Kap1V;Bf38gw0p9!yuga(d#u3Q0D|!NWHpDaa!n&(PiIRd+)V! zW^Keu3I{0^s_djYYb$$<`UKev>*Bl3nz+Ul0t)bl>2xVw5Qwq0jNJHuM<3TPn4pMSKqHwApVH+`y<*&M!{ z5aN~11oGNEF(~q#d&66W%-P>(Lfp%~9RLRurmK-|pFW5)H z@0JdhN&f-2G`F;?O!{Z|&iK#S@DmEmB!rYn|0Smp89h0&Ms!dIViTX(=B|m`7iwS0HMizZPy^=4|+bviusZ*9#?fL* z*8812o6VDN=}c4#bjt2a-$E!n@J#;TPox;XQ!MZ!EH~!m&}FQSUZA&ucF$bKd=>NL zPClSzZ^qgW;a0wBG_UAnV=u%~)*>jHRZB05IaB-a%k|7+A_mqO@Wv9^tsBJM>k$gO zNJ@KFmM@dp?<}SLUqJi6Ks}kputPJkwF*tcpFc4OQAxLyp8{7(ZgCv5rlxp`b@{JEc zDq1EwoQ)LL5CnI`yc284=_v%~nH!{L z0K@S*Khn0hEo;>qb3s^um8EPOCOfQHi}&v9949=K&G!`56g&E1UohHe0P9hcsE$ly zD82yuwk>UDpi!nnyz?r$snukbR;OsKLp-7c+q^o(0+q>JTr*G;AKm$8gu=h$XIV8d z36x@R>d&22aLWHKr3eyn9CopuQRCu{DVW^7v=g)P*F+@4peJyal)0e~LXLfn;b%erW zQrl|-evdFD@2?Z`lVL1N%;pgss6&GPMMkNRN-hvwk%mbdy;`v>!B~7<1}{8>Nwc>J ztKI?;HuuY)1|cmqb$L7j0+S;zH-Qcb!Z;)ZZ^+aconTGLV~w3mfTO7f>m-fC1694x#Y2)c&GtdhoY|aLS~fSzNL( zY4zGM(nyFL<`xSe9i|x=VdBxF#s`1?(Z<-B=T$u)Zgq-q@@)d`)J*U(NYvxxLK{oy-l@D3818o|G*-DAP64(n$pnc{`T ze!_~mOzY=eubn25b!zTw2t|Co?#~i*c;@P3gBUCDuyEAz6E+R~>}|p$!A7Sz!XFd@ z<2X;ilqW!L-&O-#7qaF-Og zGjQufmZIph_Y-+<5%67kx|P$m;M>9ZNnwjWo3hSQ$~!6E0=mr!GM@4==QYWRwcbPQ zG;#!s7tAT3Qbz%b`HG`_MazEmG>PoPttJs616}K?jk!F$dA|%@vc!p4+6q}av7GfA z_&6IJj<90>{s^z@+?P*7=;#p;rXL*3!cS6BYx5;hc6gnuKajIgU@TDWOPzOhQ=NhwD#LF1%;}uJ58iT=Yp~H2+yUL@P~8(uy=$ zQiOh~!!OYk52^Xtzf{JfC51wFqZG2EBy@An_fDtSF`re??dFB1Wu>C{tA*?Jd@b||opwE&Eib>=hHr$dJ zjwiHW~1w2>Z77$G$H*f%Va4eJQR#*-0ppfH6dz*ZkCN%>O6TYS77jT}W*A>!r6|wUy2WQ@eEhf=~HB(cpaNcbNX)@*p z2xh3kAK9wNbzRwiKht>w7JpXxwB8{orzcMlY zM2m@SM|uHH-BF@X!oBM`gd(r?i9gBl1LNc4r+Ku=^k0TB#-j-cbNzEE;s^E|Pevc( zS$8C1Jccum=eAyDX&?iJdO}lUK@OzIIIE?O{xU#|b%i+yC7d8!-K^sZd321gmI7B; z@+?A;K+pDthE*u}{NN0I@QlE6BeWc7_Y7y7*EIG(YSen9i(b@X5(mV&Ne_@M|AEs- zWWBoUZwQ5}`id_lG=xxc{H!zecS*dz-fRXxT}pf4%5l?1J`iB>jYa%-QHx2;)Zr8l zSj%uq3yoO-p4P8^&xTVHc?2tF&$i^F$+MZt|C_%e6h0@#ex)bAjqL+?j_(ujZOvcL zh;KdXzn~9%KJZ;a{l#ajUuf9ZgTG=>h_u4Ia8{qN-(g&}ktNQ_TE5)E^l+Zdr{l&h zTR1`m=gNvj1B$elkxn92+{RV-VQxH(>ekN{Vy#Jwl*?cf+BZ5sc-B*+42p2S;_x zbW-)z^g~}~niiof>&GuC*Y<~=q4JF66w6yo;+O=^=!g$m3%FVUJ}qj5=$)@*HaYfP z-pM+KTlbi5jXv*}vD}9ughZah(!+b=gDS#uqc%H5xh~hh*Q!4w6uve3OXy4J3;(}O#Q#5^3I8FUuCD&1R8x9fZ!wA4eUSETiA^^>ojRWG zf7M=2AMO1dPH69%UfNrj*dEivN0jbRF4MMCN$7u62(3Me|wm~ZoMS( zy!y^l2!$7ul>V$j!+~G49&jGQBsh5uL+7pDYjx;cuSe(6I3BmGFMQsjvO#HUNqr(- z$LcK7Nskn-a}(?D6|rZlghZZJH$I6_I4y;}UJ889?_-j!YQiMn^5mKDS!j5-2Yj&> zuW}%kvtIpt5W@Dh<;LP*NXmd($6Z=-7!@jB_gWD84jNEhXEdgJ$r{lH)AVW1z>HQ(D} zgF5Gz4eHcaghXCfD;`G(0`c}bo_N0f-3HWKzI|G6Idn!@CRVbXn>%sb&F|LtP%JSB zv&VN7Ml!jSQd&qq2m}RxKSOL~hh+WU8mxuSgkYLNHaDJ3NNw-bBAF=MjuFc|W|n52 z$%Kz#WH(Qv=MD78e$Ha|!wD(NlJo6$qBnyLw0rKux-cpgB9?~*_It~V_s4REDsbds zWA0oyW}F&Z9}3j3wE#z)4iaOWL)1~M%qoh!oGLpzjIu;ETnz?*FSRBg4HZt&d(3U0 z93|>)jwn&f*$4C2BzqdH8V6!S`J)Jh2lQs2srbdZZo=dJ49aIN-DwO3I*r_=8_uo# zVqqFP?0c|rC<3P>M;dcV3T!zG=-tLy%#*;huzk1J23E;(6Vv~HEcov|3nBTkf9=gi zZh2zSWT6QX{7m)-1^@X&Ap_+Rx9o>)yvx@~gn(%%a~rk7lP)3zzR06bCM_tVIMoI> z)4(+O86(FTWwZEqqm~?2kEn0I!w}UitrpiAnG0(hIM>P;Ofvr9fXkC&Q5r&Jf&GI2 zraXj-iCdmi7%`X0bH&24z*n9DW%6C3#pUZJo(PuK!oucgTqc&frDw%T?)wW#9Hg~k zxe+1BBVxDwGBjEi_|9u9lcDi-a5Bw00cmsK0i9r1TM~i_e$x<+Oi;!TW29rX7?J~u zKeLGwq{1_xK4wIyxV^Zect4hTAQE{q^vumCc5-pMTV4yD505e+R3^_TEDLl9es?y# znPs8fr`;^G|Ht!%%Xf-+KE;6`9U%Qi2zadtkn@~F=O=JHi#}xo$Msz-U{c9Ygvz7~ zc1-pk7XqE^pkOlH^OyR%twP{46AOm&;J-}>xs|(#%l@2h78>0491L0xRxzjB@DeeL9H1qTg z1CT%H84B)PMuc4U=Ea#V<(eZ9Gsnn)k2$YktRYw`Jkl%#&JhrD3QZod6lQ#oz%YeT z;FID;Bcz=5E8TKbT)oeE8YG_!6BIm%Uw3KBtIG zYlLJmvY+uKZEMCNJi6o=kmR-#veM47QhtAg^3NqDz*zUm$&B^sQf5NWLSL9f^`8@< zWaD3xRofn3lEo)7iRvH+l&oEu>`ph8mpr1C3{Qv>)qh2lsPjwME7o^MVfbyt=)Y1+ zsV%MMVK>UHbR^LGw|@mK{lS;)r*^=>s;ksE%|onD->gXPQYm**%!zHzck8~5_<{_}$Jh+#e$8~C~5+}K(e9_tH;RzAIa)7p8 zl7_fZ$$thTEqXk|WTB}_oJ2Y1QqD2=@L&c9crXLt`JDMtE>Oq00PO<9e7DSqBRuUC zAua%vKx@D0=kTOcw3q%^I7o<(Ac>Gkt&v+GS=2@C+A<*dL9>H3JGQ0R*Chyn25|7R z;&&E#@AlmByW1|p_T56@!~O`#<(B%Bx6fa2ixAk-A0c5wdFH(OlXu@C1lG~prt*Px zC+7%(zx785l2SKTh!g4RP)-ND=@1fpNBgmLVjUkf?IPvs4~F`&;nQK3$=PoECyNEA z&{QE#m`n-z3Hlx6vxSpa-FTSE{dYS+pH(P1MJGGci$!awpo3d0mcU4#ga*I{NM6q? z@#sQ8PX3VavtEi+=W^qmrP$Xg07h_h;uii++6p|A*fXh0bP9okfK=-_aIbc4)Bx$W zw|Xy-S6J!^1)_JPuwg~!yn1r?9p1r0(~5zPIx@$bxe>D5D3{eOXFgI3D7U2>UbSQ# zL;O5WXE+`L;x|_T9sTNF9UXz#?z}2W)Rp|h1ALiZ0Bd*TD!M(hwP~zM-E(OYb#Aya zO4Nz`W8iRL;mj(aa8QkqS}qOSch%mFeO(!~lVkZk1MQwatAR^dCc%4j{Q^M0JU&X) zZ+L^OIv%s+c0!0+#=d*|zGRdG^772J(UL-$p znC0z#@J4v^d$!Tgq}zQ$;6nbzPCiwP3$JF3Oj^xD2(IN)9owm>BQAjs-*`_l7ro}n zq#6C%4kgQal+3>}iQm4%N}koD#inC@3vaLc;gxb_U;bRF#dQp z27fF#IDGF(v&g12=|^8D@z~nmsA0gXBd_`YM7te{hMVzn0;pLr#OfLQ^@)m?u z2|R$jF%MtVNeK3JWqbM;cZ5gM8#-4? z?&gOxP}~bh&3O)KsT_2@(RC!&K={)Wgv4))J0ibNxbw$93SS3#7Wu9(&@Mc91qce` z#4`KOi!D?{7zt3lORVf6^2Ap6+gswRyZ07a9o-<6DU*rDQhM^HU&50phjX}@4K~}6 zg;yT^FS!D@TUB^`F)&^2|9+e^XiF~baq<307AD&4^@@6Iw+dG%nx@GhB?Kb2# zG;1M`Z%GFt{m=q!_3S230)FPcVkzX3cH)?vT_H{gfBgfUjkd_&T;!kOPar57qlQu- z+55M36T)f<{~bcTYbudUU?4b+GY~8nVQDM*n5zXnx9QXqaR@VDK+C-hhVRT12bF3t z;r>4mT?`X{ZyT0y%l9nw=|;Z&4*&jp_Ms+7fb6IE`M6A?Tbe6cD7{#njaB;PXc3R^ z5mz~?n(V=G`F}XdhmDClnZ}-@E#XKNH1de9zvfNCD15s!fs*xCPeC%G0Tsk54*#3g+cBZFZ-5JGD%_3sz_4;vWTv)$&3BB5!^P>A?R1pl1| z)+)@gJO%O|2!S#KLd6|)JcS&{MZ8xIz^b4($KT-{2#T+p`f+DLx{f3;IQ8jwG{&X{ zA=U!+RCBh$z{3UoA4bD^yIvnk<{bKBa4}m|t98qCTWp!`WYW{SdQ}4?Q2M+mNz7bv zag?Y-&yNz-RH#=q@O0Ih#ydu4ZnkK%Xh9WXp88Z?0u*Qsg`Ck8aeh=9hctsKgu<8h zL4sKAS>%W0QpFt*Olxv63BTJ-h_BP^&0u!U=Y28TxYCwpq*aXc;Slci- zim(jjK}fK73W1ZLQkoJokO%|?KlqZ#juOmO&n|gMOhGQc$KvKUIrvFQHI{hzYiT{B z07(?xVEHev=DP|AlD(R*LvAro72ajBNeY;mvf{KrqLrPTI3F{n;s^f~X(iKT`1S}P zZs~|iiel-6TRIMBP6-b#N`5dr`9UTjv2~Ag_p=Z8cr-xw2;li5=P2tq=LC}3ECTND zUz4nm`|`ph+`s>VC{c%xjuO>!ZZht(ZyD}utXMm41Lpf&F48MIg#hHpZe@I*on9sK zexb=NmipU;z*IWeKrr$F4zxRjwXNYleM50>n!_zmDhXfw4IyPxh|YDKZ^^vakz0at_ox zWudNK3w<#pxd%vuVubiw`S@$6nZu_#A^LU}* z$rS!TPO)6ZnRw1F%3iAJdixzhh>D!3&NH|;$l`Zsegm4PSOtr3f;HzZi@I>VwphX< z>Fcz4ev5sbLp+(qL8haXwnyFo*-yzj@y84lKb}=Ddn^_<*0T$97ZT6e+-@GcI_-PC zvnjgC*3cJec+U37I(}^JD#WrO@-NIX@q=tdo5-D}#P(0LiH=8OO?aQeao4k%1eTP+ zAOPr72CC1z8Pn+U*iSr)e*XeMn}0l{n}u9D3m1)H>Y13|Q30!IFvYlA&WRG$%|5-j zPWLH92l5Su33T5>H@B(cF)4o064lOs#{jE3yc9Uy%WM;)XxC+6QmQ*-yz*%jJN189 z6G0R$l3(gMG(W*tMJ%TmO>RF_2$#v6sb-uMkxG7cMp3+p`n1`k(Oj?l#+&DZp)num^kyU+j# zs7z&w7>M!}SHtXoL)VK+ti@(2!E<^`?8LT5_Lel$K3PE9$I2kJ!eIUN`^;?`t4a!622|Ue5M`hO{_C(Z%J0o-Y;sr zO7j4xZRFE7UN|dCA}?cUSNLR<5Ot#mp>T6|3j4pZ|53ir_@4MdnD>}P;B_8^B2V>V z|Mb6X55|0UVtb$0_R*f=7Ym5SZ!c$b!s3;`ELIvAzpaGuSmu5kZE#kf#s&8}&1f&s z?inkuDS9vUT)O|+I8UJL!bfN!3_o@S{*liFi#_1wK6HC&G2r<)-9LGWM))*VP-p74{RgW_aC7S-G4BQ5@VJnSU1&sQH`fKXUW9e;Fn zkMYHt3IrG5iM1$;g}BuuY9_0RwyRk4zRioR1u&$dSP*tCyPEpZ9u1Fm!5w)oum2gA z!o%|sg52Y&<>Nbyeb{s3B6$s*Gj^;fW?ssH4$o*TKhA9Cc6)^ur?|kNv&fbu7S?)~ z=W97jdLtWj58RTDGK?d@KZ3qagYXczQpb`-Z-h5)@eaBNwZ~{CCYzdGOcJ&Q@70j= zG;Un5&wBynJk`#`r!%7h?_i;6`oOxAbG%tXQ-xUL?-2a$eC~_bQWjghmE1`;F(Eo` zx~ir(FzfgNIszFX?rHDqv@D-q-0pApIO*>dS1|4@TkxiMpLTgi2u%*l_|ip##?KP| z7;NP6Cm6hngOTrxgOP8GgONj^|A~AaXYoqb*>ki#1H}#4S$4||OyFi)6uG}R7w%yeXbEcB?mEF11_*Ib(B#4W*g$uDWfm*CI5P4jg~A zCxfw_xlxF^wm|39mEu;{pHSE+wL#z!b@uNN3g4TMBq#|V?=`0MXJQM5v|U>&(gh!^ zQ1EMl5g};@MBG;{`(R5B!t%0#7A$8D^qn#Z0V6C@>(;@^a2+Uz0>=gae+&ruPMM?% z(MA`I0ymIYpb4nTr*@499PR?uAc4cz6uj1W?Y^Cb_p-}A5&B+Ro!L+XF zIZ*-`_SM#GDEGT6T{*D$`hHlHsAFikvuL@CVGqU77UQw?%#>m)-sqTe-GnEI+l8gw z_{6t3bn+Y=IyC{0*@vYzDz;Tc3Gtmu6a1x7LWJxysn<`Xy&Q_DTI#L?rclAxhMHa-u|?YmMq`z|T8P_#Ma;6?D9_im-jJCp%H*GkUJ- zV-_-c@gs>GV`xF{iv2Og)Gc2f;P z;pbE53ltiD0D<=~h-c*#zsi~TBI7~`mPt_sGOXCQ3%>PCrpD54!T%II+#xi8J*o5= zuHTA4s(%}G5`yg~1s_AIKd@Z6k0T4S6l%C0aE7k#_D=*KC?%v%xYXgH{Ha3BbJGo& z+ukfRSPjhPpin)Q1(099Fj?0(bVyV)4l#-9JGoG7{0+%!%436d98g`$ip`y^E0#`+ z?HH`v)2hE?#r|}C^3L(u!P+*F7Q2QOd+PdR{n3sxrBV31HAp2*m20$nW_EW=j+T8)&?UT>~=?sQ!LU4wSVF^gu=fjspWe!!`}t8 zzcsYKHCQ(P{fithr(KD)(50Fv3-UbuS6Lj4jA4SiWD_;=S{ZjsQkyxE6|&H(1`hSzlFY(?Z3_BHbZeo#KJXgm?ep|?-L$+!hm?t zce)*WiObQI+;>Uv&oHo^vHIF_okF(|Yhm?^)XOuHqz@kc<>o_V_Lbx_{|1u5TH6^u`ZK%NZ1+3(&!~(Sgpl0o!dnpvXGZ&ouaKO-LpGaVu%SS3 z@kLlMpVN|ou$=YV&vRn&jc<)4}qvQ=oWCdM?7gF2kbnn6zVQyOx{BchGZ*R9$Ef0p$XfLhYW)MK{)xwLwpBE)OFdGTvOMTv!t-DE88=)t}Dlrfqg;74V$qq z=!pGd`oHG7u0dW}q$KvDTzfHi$|(L*&Uu1o|CPf8}L)*Icd7c{!Xi+P57te%WAb;8B|pb((ql=^oQ zFqVJbo-9dylm+|11)ACF{7fjguS{1EEN7q3(#ukq729!ZGNW2aSFug3wZ9$9M&Sl7 zWtU^QQFB~%VEN&le4ksQX0p=XU6;IdyEIEL9RHiGrR&R+cY>K&dI|hh%28eYqC_2M zigMLeEYGhHSV#zojK-QuTMv1C;5Pn9F<1BYNmdmhuMOM1x<$rFfJ)bCE~R(L(5`ROKvB5(Adm$TB6_ZL)Vru4$hqQERC zch!+ZdeJofF`D@b%TF+x*;~7Z64YWi3BvLN>p0a|u##2x7z-`~6j3`&i+%_(@Vu9C zp$6~{VinHWhYdTl2om;YVHG6jhTN+$Q5$OkT(w)bSfJLN0)kxg#~48#|1?gJHPHM| zLv_uAJM*Gb+8&jn-XKbdBMdNCjdPQc<(dpYmKqIN4q2EIWoj}Z4$sg_5nEWXiL;Xl z@pyWasKuCJtm0d={4Yb^r(ShK@EoMxO>S{>Pq!8h^X2_VCaKg7sz z{@yqlRs$LSZg4UgMo#n~!^gT|{*F{9BhZ$9fI!t60?jr9W^BJEnGFBfPfvy=tk^p@ zC6nQNx)Qy_TKhxf7m?xV(@^?9IGGGb_0!|*`)t{K)|E_#r_FktecBWys-1KbRqXEW zg$&NsDb_<>GU${boF}=(VKE@iWBLk{s1Bp@?*^whEOfvkY;cQt>21P> zAXac1-ECQLpkqZ9R-8t2t5eL^4n&KvVzc;-vdJPj#e5$BIv-f>w?D>Mu1Qq$qX{NE zDY9u`sV(xX`mY%Xg-H+fOrkx+##sE3)_!0Q(Z=o1g53TrsKxTw3=TNDL-miButRkr zo8=m%*CYJOVjP(ds@ZIteLXlco72^7rWo@>W%OFWuSEuCcm}5g%E}|UhGy`Fu(F!H zmR7@OSjjr@utgWVPx13UQa)_{?v%!dvs@qLumvUv`4pyGusqX><#a0eHMG)aF(ElAxyQrp8GB`>+-lyr(3Kkvu_uaMo>Ukh}Bc%{DFpT+VzkQ9O{=sY$d{0$O!7NV3~bXd^~(LpuhqHwtpx*^eC|4wcLfl z!;VzTu`+(EQ#SAWqYX_C1iin-QvOzXk`*|EVxIGmwhXT3rUnpC8>HZ6OmwKk(iAQ0 zz!a-&e)x|z6bO33AcQs5KitOcE81ZHR*O)$iKTK-7u6yZc?dHLhT#eE0%v1}6F6|@!HT(MHS4@nsuHd0#(^AEZaGhzI>@VtiE5y+kjhcIKqP;u=#zk6Y;*Cdxq-d0222t{D9GUOhnS-#L`#ZP{znfl^MFwpDOnB%wK(wsU zJRiSH9-jY*4F&cKfsxFja2K5}=GIWpax~X>#@F99M>rzvgcRrDD-BHdRgO7QYc4aD zn4;?ATu)n$<;y#uEo>1B?U{3`p!as$H` zSeNA0%rZ_|%?+b-M>kqxPSe$V6K}!FdRAWYh*tI4Z%@UHY63+A6RRyqsHT`A`m3r49CQZc%pOg1IaTm6lT4%iE3l4sOGLBru~8{ zIC@ys!kV}?zm*}y~=*c``O41#}(5ux`mZQs!1?+wvLL%9$;Y6Z4`> z#uh54%BEIWC$vZ}3}dM9W9eoZ*gSvG`(SU=6w}H7+JMm8YzQI0LD*1vN&S~(B?XA1 zNZ4@1u!l;o_7-8)%M|cqj$q|!gpLp@CUHqAbSMpv{0tk83QeAcl6Y?JXTHj4bZ}pD134uG)bo>H>g=UKC zImdnx!OF9Wz|Q5zqyIait(}{SXl3lswdBB*dY(0+ScJ9W{gIKnMKR8y8j-G#_8o$9 z0}$l`2bPzXVEd1PuOyu^!Ev9v<%tfrJhjAaZx(#{>CBamdBtt!9%ofzb!_o8easfR6}1 zp=`4Lhz{Z`f>xUlxWT~YlIN}E(9NBrlTt3_MPg2JQ;zo5s5aHHM)QxRY8320dravw zx9S>&{3M_?k(ukC><*dO@ue0;B~w(NQguBE_5f+cb`!HsU(4ZX6V*FaU5|piBCJ`3 z$Eobr&KQ*mfwn#<;(z$O2W9q!`!PyOAR~${z+G4Kep4vEbRy0jh871Kxe0u)l{!jd zi<;~z!P>cOA~1ffRT_D1e72`%e2x&9)-Qnxl<2xvg6(@feYNcg*Fd_8`Mu~hYR zaC(uoKzBrOHr?mD4tFzy4c~zgFz}jiH&K0GcN6t)_RR*`KUSYq`Z8WB3w&v$S_`iH*g%9TS~onm1(K?o9+g8q-^kCQxNA@&80iBFXk z1&%smt(}W~Cygt%3QZLQ49dNAjyf`7$$Hw5QJGm+cWT0t7kr0=(4kD<7ZXBX^h-ZR z8{A%hlJrMt#cJgNR7gmD7rhf2?j?kPl{r5-(Klvw)Der|yO9ureJ9g|2g?ZMD)B~z zRae53Wdn>(WlCLXUFQU0RUtfb4@hHu5mpT$g!m5i4;{*Z{$!?q?E7Xy=$n4&QQEJg z${K_SsXs}GfP_`wMhSsK5?|1i*q>furKSa8M(={l2Px-Mc8Z0*&Xr3>vpEg43k{<+ z=G|zZ$Wg>FoY#33h0YQ?EA8tt2n|OOLQ*Ky+0WM&q?gzIiM>+h*J^kMUi{}qrvd3zDnQvM|rBlvcYHS52Tp2tb7T)>39J z*;<+xvw|+IE)EuVxTKCqx{GYX(pI;0!1WHHe-jXL+xG~*DG)1a8x9>Xx#bJ`mj%8P z{NJ-Bt-l4Qm0DfC?tUxpaQV7TUSs5Km#^FGJ=ZB;=vZMxlLOy*)3H=qfTf+0r(IGv zmOfX1brY3DeuzZqhu%V`d|@sxnu(>-JS^=DFMtI7-B>!Irt{J&eO^hQ*LVLMCG))l zT+&wh%*mb|A!I&O*0wxeSv4&%idE*8TGh?7cxA5o?$h*xmz?s2kLo_Cp&uNJ@((^z zW$gnj&n$p&3I76`nwbYTtp;wP^cH(s15fYQqHDLn5l~L4Jj*1_tSpshS$2J?)S6ry z0l9m?JoJ$XF(YTqpLpLY))bBU@aG=@PenyRZt<3QN z9N*(G>of}YpG~{`w$@~sy-ip( zfDT$IkGiGl*tMx!8=y43+$rad<9)`;M2q3a$$<{$DaJ)i-J3$|#Gg9+(I^dOmP0bzyVCbhy;nzpSLG^BU`$_oi)4I>qwoSlKLhQ1MUEXAu@$B;NaR`NDMu(}mf zw*8c6s65>o=6E@G9rJ0K{jlK6HzG7i&VQj)`qa0>Fj@L{sc*Z%Y2WP;N`*~dq1w$@Y7SpQ2yset z#qm;wSR&a(x1_}*r>*5VrMtv@H^Udc!BN5T1Lm~>}$N|#5+5)2+Io#S0HF-|?TB}l6|mlaKLd6eBPV0UAR5DNQG_c*U}ibojzt%nK^Kg{W`k<%Yj zq42XUHY@fa*mEMV?Ppe{%j_d7=f&Q?2r>Jb6*Rd~bnMo+x5@Mqxbs7j2Rv zs^k%E(lSb%PBCAdHxHySwwo)&61czfv|ZaT>^*<)MTxdyTKln<)TiblUCvo`1+8my zuk!KYYm?V5UY|TG56d$O!0#;~dLSH{{(TN)q=wK61+4F!`wyh;J=tq}LIcCC#5tPC z?N4AJZZS`7+YP(qX{EYdGQ>Rm;nQxNK4Hy%DE8d1bj83ky^R-R_G5SM?nCgd(9cet&L{@2_Crr=|LSY?6PCJkbK_-&w-^f(sH{Y(~40%7zBPEJF$G(`SAN_?KUi5cC}>?!tzxYTLvMQ)P~3G#i4K(eR&a^<@c9pg z2!*3(@}KRU|Kw@>$ExCwfKLRj)ipH??7#Qu8Oz8$Q0%ANWP4}4N6)6drK~5gtDC3` zSiQ6LUgr7qgBRY1A?zG|CjQftzH@q1Hj7m@s<0=03k^@C#s?-qnnd-fd$hF9;E!Sk z3;n|5+{bR^dHS~@35Ijd*g=qQ)9e~M2;s5j9D0$=SJI1Y&W8(NvjIz4S@c#hfBZAA z@t>m;2=>GC5DL3`&EE{;PvR#m!m3GB@Bdx5o}2j2OTpi~sn>5te~SuW=?1$_mnDA0 z#dx_#%wulgPu$7WyM_LsCnLUYW5s+*A6j-Sor@2fVJ=Rbi%@t`ihSb3^tvh4I;>l_ zE4`HtU}s|atCIam_}VEnWxaXFAk>&Cs{iO8)r@@L49K8!Dw%&SLXlT`r;o+sli{o3 z`Xe&Od_ThG1v&hT;G;_tK)#Mv|LO;Q)Zf#8v*G~;Gxg1^XoCOexxd8!6XuV7KWTfy zV&WH5`hPxc=pFy%tSzrnw9MN1S`L!4&c4fr0`0;>r7WV4l3Ra=4Xw(Mv+lXWhJ0NH zA&_rC2+Y?Tv(kTSLv+XAjbe|E-D#r)5P6lO`d>ddO=R<@zqKJ>r(tD=4(n@XqF5Lk zN+;^*4^I=-nAc4P{PqeCq2o3TuOr_k|{N z4t?+ddjL8>p(!hRhYf{KMri4b+N%o+j#bx!oFW!fCkrVB2Y;jv36Kh999N zx1WPhcycsVyokjk6X&kQBpR51Y8;qjMam=Lcl<)QyLjT z!cHQSaUY17QgunwfX(9Q6WzK*$Jp0p@DyNQm(e>e z&YYks$P#eR$h6orJZb<8NyQS#0IO)Nis|Z z9q%R)h{KZ?-+jIv$091c{qQr#BHUWH4k0vzb|uR-&xWwF@$U$sK)ZK1Rf@14V|#F`y8Gy9(&7|L^y}g5v(K;|Uee22q2}5sB;zbw*XImc>BiLc zU7te!2gQ|=)qI%8y=dhT+_93?zt+Yrg}$EL-Rmyk@!<$X9!=Gr(9ndE<8>t9r_eOj z(&!dPX!t2P#lqr_NG0EhR8gEZ*T0a1G(pIs4wwm2pY1Sz7+0()SvM}y`g>WKhG|MU z*pe0sO|%`~NyqZD{Oz#E0G71hn~CM&XVWGE`@O4hJZ{IXu_ftaC ztURG<(a@fIs>UkO>S*+c*=nt$cZg2OM=0`0DtKB9$?-3>IV_=^nQ`z!c}AX80TJBt zIjCpx~gGWr4WZ&0xY}5)|j3 z)+Or4iW%L;!=P<=>@T9`lk0@^)EB>Vs+wpIJgQi zPyOxlM6Z9jYgk|Um;XWi0}|`c?6dyM66M#VwSq-5pa4FvEqzxwKuspM9ydo2YMX?I!9o!EXJ~vxg!S zt|EQ#f1zQMp=Z3Wi9DDtryd7ZazB1LhglG1<(-DadF6fsU4$IichW5QerM3k<~)0A z+_H0TwOeuTt;D;&ciE2!$~bs^+0mz{2;=`4mN8{By9R=S-)2C_DVwLywjsA%Ug%aT z3nvGHLf~_j(wY@)brd3SG^4f46?-e)a@F3NGNsy3X8&~M`DF?=xFz@AYI%72Y#W-U zjNcmA?@ga3nYO|fUQHX1P`DI)*Wx44GuQc z+A9%?>~={(x3p88cO@O^=fy#_3VsE{HUeAy-#o>|vT=Eg_<3})%bo&p=%OJ!3uSNf zdwz=Z(1}(&XDZ}p*n;F9~{5bcXg&Av}7grh5F-AI1Sf#)g$`$ zBbdM3bLz8^(3F{teO;6ICra*j_h%zvL*`UwTWemimIn}Mp`1R~nP~;n2A6j{u5F85k$Ms!Z2%*}kSjiP1g4Q!=>$P^QipaVLH4iFN)g+B6yiozC(7Ki@?fc%}47cn&TH}`8M=9POt#s{D%j_ZHK?=+=rIeJ}+XP>h z0io;Us_mWAlx5pHo$|TPDjS*{_{w|Tv{vW#PKNpJqcvbCu$Q^c9j(TJ{hssuNWN@v z6%`qm76y)c)7?^KVb}|ZzDLcnBNVQIoBbd#x};EXP(5BkYiNu7gS(qnvGf2|te7KD zaEv?0#`UUxBxk*K6KjCEcq_xtrKRNcyTKJy@$crs>R2o-DC8WBJ;aLnajnJ$xuZO>o^buGfd&tJup^bVQLXkgzkocF_-miL*Zxhxdu&ou}V!W73wt+ziMc(X*Pis*! zeQH6|VcmaQpqsV~OxkhiTN9Rc;z0Ydv2^R^J0M_RXGX1neO=aCmoHe<=*-NE{2OjB z^#yN)+m`r|pB6L&mP?49ad1<&&Y(g4;xT~2es=K;LI{#=_6d*T9{SG@^jTp+h&kCm zK_74yg~Z#v)K{2nUTGDYkU!{6kE`JZltoG8i2ARbRQjk(d)t5TxnXPw|0iK?!r2H# zHl*s0(D26|>$$d^%TqJiYW~tjSf*VQk`=$ZLi*LbmM!`Zn3E%lycE)}=I&rOQNP*L zP1Fun?}Pf_D3G8a{WX}OOR^CPm-Q)+3k?T);1}iplx0b7u|S@Y2||-x_b}3Y2KH}& zW;?{>!#-n3+d|NOKDz~qk?!<(F^JH*Kd2w|a#rld0lL~C)yevxe$+RxV%_>iK_j40 zaI~>5-II|l2$5F+5`|WT!f6Z=iTKb{Kd;=Ld|t7MRpPUz&`AI8&`mC1$axV_#4uL7gKg7^8KIKHVX0q|m z;*Qn)bk7QzRXt*%29Y9l>RZtKwn@o!u<$)SLHq8CyPDSh1f@Cc~}!-EO{Dq{aTsidFTf?PJ<P~fm1pB^5XEzSP$LsPfUhi@w^gEdOM3ZGYU{P1Id)!rLXe$?&th1n?`) zJQMs<(El-gbfpV{{$FJDpZSZ3_^;Vc4Pe_wFY8ef8{&GwA;&_FRiK zD-~Mz%5cJ28?z}lVri;HH-wYv2u1#$y8hDC`UcG$KxcUqHz>K{il`n!f1=gD*h~K> z^)if|5JVtM89W6lrB7aclTe!h- zZ}Z)gej4<7!7c&$@;RWNW=ksiS;OE_L+hxfbyW8}W4_mc<;&gj#6m33D8hk`#g$lE zCRVy7oxWPHWzyn+fFFO)q&_DXFndh!%Btjw%xdi0R@1DmP@u0%`XdAeyB~ZI?@oCU zma9Yyw(s*4VA)ZK?Rz}g*uK}>k4mj0o9TZ9|Hs4stD95I|Jc;}P#Nd>4Spogu(D`3 z<8H#z2^>0<<`kNo#iq$psML2rD6Q)>dW06IxKx)7rZgBfmDY6`JpD@Rx{Tg~!Uj)q zzj=Y5HpH-HspYHiWc=hZLu}kJcgCKB3|y|Sh=@!<+`TY zkh=Z~er8SDuw-tSW<%=8m8Xb$S5`MsKe?@&sJE`^ChCP$FlpQpB`xM|mkk+p+o1a? zD7upkb{_tJ{140HMXY#4OPC5KAAA754L@Sv;nCRl)e)vN0gj+tHT{7ci1Ac|2*J{9 zE0$*GVQB=GZnpqB2!EAYe4R+}_ctI^e`?BYcYD2O*Pp!O?%Q16!S!8tdv3eSJFvw( z58IHT#XN^zzyv_}t7Lv1+t7)v2qBa3h`EB^gpd&-;Suvxc7rVR=1P8Jr8gJzn>uj@(23>(cGCto z$N)F*vK!W=*QeM}-BtgXVnYV6mDOR`={wNR*V)fAu*IxQu_4rAUNr@Bf13Zgl4X}T z4@+HGx$LH42$7vw$udeg2+Ia+UI1~(O08Hb&5K-&m8%v!n1j?mj7MlLZe{xVQE(h% zzVX2vgs-AEp8k^QT*OZ8-fql6bFsw9S~0(<B^zI0M`3vqeVm8o-xlH4H0ESBi94H3%Hvm-97DdzLcWaGFE2|tx5yW2y21t(%YobFnmzznxc74rx#(kWVVHVtxhQ z_cgwx_A869e0@2#?-cwHJ5wHn?K?fo+_H~lFA92Sv;QA^?;ajSku?tYOlHUsh&|yV z0lAFYtBET(QIw3pI_RJk95iZ_fKj8%X3=F$7G?l7klP1C}eQ)o z&cK*3`Qn$zM`DE=iKRMNHNwovM}de9$-}pP-HVYp6p8;cE;t$f0w6IYB^j`K9!tAcFvoi*-AV@Q!i3_tq^@>-z zk+?`bkMPQTB;Gv_i8mD@>5REZRGa;EzOwk|D6xTmk!GjEgpR0hli%T+G@B?q2-J@D z4;DuQ#~h0Z8L^4u@BtJ!=7ygYX85`MST)CS@Z<~i3F)Trq%uu?VrEZ9S=1*M_M~2W zVq;HkvZ_z)?8%SX69;?p+5q*5n?0#WSD#d~ClhSy6V61R9Ev?L(65XiX-_y4Jo&foi5Z?e!pELi;7PGP z_QVEHPS1)xvBQ&(bWbR=9vl*TLitrV6rRY|l?|kq(f(vK-XYGnQn0aeQN2R8GUpe` z&2(Zf&yKhKVauhzfNpDpj9uS#JOh>z9>TInRnj+7iQq-zx5%^~nZ7`|dzA&)F!TF$ zFUFBSBDqp*JS@{0`5wKeJR%nu(MAEnI=-($TCT? zyB=hR34oyoM=_AIO)AK0N;O?6X}tgk^6Eie-%CM;+Lv#!LCS@dJs5}YU?4+98puX? zqk4E*E?ti0(uKPLUNeQntcTaE!E0vl#!3-*&2jJ^I?mvwi4bj&2=UWyJ-p`B@E+I0 zYhmzO7`&Evcr6qXiymH!2Cs#|8!JWNwZy?YMTOTw;k6KW&o#hnNeyq89$p)R*T&$r z#lvf(kl6I_+BA4=4Bl8N0|cNJIZMUI!SHb~d>rxkI4GnJJw6T%9|yxHR*K-`h{NaSV+G316(2Xn$4&6b zGT`GrHGJ+@@u_C`R5N_4VB9v?G>)U3zHtl?v3_-Lg7AF~mkx}Q~i%oHCp*Qh-CqX8fD zso`T$@v$&`EDRq@JU$i*sYQ>EMZ?F!@X<;EJ{BWB-~OcHW1;w1xJG4XhXEhUso`T& z@v$*{Yz!Y;JU%uGsZEcMO~c2=@X<;EJ~kshr)&7wC_Xl>Q85|uv7H(|b`>8x!^h6> zvB%?Mr;yt9_}Ddk>ZN%r3BPu@C6rXCYQE51Az^D4u@Zn4hAI?Pa;Y<`C&J>3aX97q`ISn7q zq~b%0Z_<|nd`Mv(J_9s-I1}K*nHrTNKN#@gOs9sAS;fc9@G&!d%<=e`DWqmSK4uLc zGs8zMMe#8k@VTU2!^ceVF*hn_#^GZ=HGC{8J{E?Lh2dj~$Hzh;wdnD&X!uwdK58k7 zkHvsb*!^fuLV`KQ(7(TXmd~6g_n;svVhL4Tmqn4uh*bMmm zpGMA$9BVaclUv89r($ijUhk|B2z_rueuUm8>{?+{y5f zJc6xbZ9p;kkAass(f;rxKJtSWRpBoYY?}AI@qMkv*m<~Y$-q@=TWtOy z4NZ2Cn@WS~sh^!cj5GPI{L4xtjeohF&qECd9mv!MD@Bc3v_>|Piv&mbe51Qwr5}Rr zzjBd~9nLV6=-`)m1bZ33rNPp{SA@7SzPzQ3KWw!)hjC?m{=Y*gaLm3y@KvF}F~=fu zb@MCU0e>U7`S}&snm8`J(+oRMeb=Bs$X@nBJMTNcgD*$z$kd8t>xeOfxH5j~m4$#8iL*aaGa7&~OASmNMF_RLuNT`fIoO$mB6tdP1zTDoymzUVu z%lN}>W&Gj2@#Q;L9ta$pdBl;cyv$_-Gk_xQCSdnq2-vEg-gt>m(cT=tR6*W~gaa`~BDaM^U= z;&cFFIzTZUz?cqjOb0-w10>Ud9RK^}4{srt56I;ca@kKVo5^K6xipk{h|KlHh9A$T zY&cHuV=Ut-8wS(+_+j*Z@o0LVDva+p8$7mZa`W2Ol3S5&1G$yhUiqKk1w`_P)BjtN zbr9hI*#OZXl2PV?Dm+ky2deO}-csr&-KDUDFRv)=;LDd(z?-k(`X9Ld3|EY-uyTQ0 z`QTj`p9~}ax6AiWP~!Eis#GsMi!&3pTINavIBa#KWc9FA#FRyZz@xF#K-nYAq_kB~w`#Q_`eVfbpeczVx z`?i$vpSGbulN$w^-6#-oFYm^f<9zFzIIfYjvta7hX!(Gx(Q-HYEARcA?!rxT`Tdx{N%k z9M^?*A4a*IX!m!d0*&6p-l50*6$uVvGUbHNqVYe5_YT;JPu}ON@j?qJ|F9<01$VVv92TVA6j- zLms@9QPp>ij;=E#-vB~awEJ5fX=g~4=|I}u-ywbn;y*-roxTx>|JR44Xy98?wO_3o z<%LjQD{}4Tp9t~m8j$Rn2G5fdgbxnKm}tZ+iOgPWkAHhOO5YMWzSa@{wwb*p0)DMC z{_PV{j8&m52q^A?K$n|eaZ42Aa5=_!Yqb3It$Lr*4l}E_-f@ZMkEXFd#-yJIB zOWFU55b5Xk$|59oqCk&>Un#>>$1}D)GF(Tyo&2U<{$GHMxSAJyi^OJP(CJ^=3Ys|r zbtCBwrcVpR{6-Yq=L;kGN@`G!?BL5!(|zIF;reXhwgS-(*5lZA^_94H_rZqm?WDv; z^0s>FV`ma^50;T3hJf#tOU549a&YOAmaWn95nKP`+q>o9(h>Xo=MZW3y7pHpUU9$i z)u63ii&X%(_I|pX-?R(h>@E=PTe}v=bYOj4Jk46AySo>!jp@hbhiKN?`N9$;&!_oe z=Xu5XLd9IKbUsW4W=g+z&Xiti<2Y`n^v|_JIBurA$Uak~e@_1ALqo`{>B@GmbiqvV z?k?XbFU`0*U#NgH6G_#}w^Y;EqbclcAQ~{vvVl109v5-G;Pi?fA>XSeAw^!XL@4x% zbA@8BSSge$??YOIDS~bure*+f(CLmuDv?MfYMxf)l}IHLsYI$2O7)w-`K)w4>nb~+ zb(Mq9y2^>f=%Te4DoqAljDCN++G^|9<`7umAh?|MFMT_*1GE^FnZT!1J&L zqQ}gy_$&R(!ms!<{maJRzs|&Qd=2|;=U1#Tab$iNg28jeDigDtS4a>a)k9q@>*x=N zbzbqhiCEOFqrij-oaO0WhWO0xj4ziO)iDxQc5lr9emZwYFd2dT6W*S?ysk|7xdzB_<&qsY{;wfDR9t-aq}zg=W0{ynkbiO)if<8tYBZxH!V868RaYO5_)uDv^hX zh}Ezeo1KSawW&0As&!GV?}#qCl)Sc)*BNFatmDXQ zf0o!jucR$MgB0LmhylTKI?^s-$Uh5^SC3r5dN(037#xiNZs%y+;VtVbh z=0<6N108&MiT3_l z!q+0;N=aPNo5)PyR$}na9UJQ4%jb`6>)^|MP(h`!f>}f`ZX<#*7nnUCnC*jVD^pfm z=I-Fj?=B?s=F$$nd|3r}k;YOl(pc(68cV%MW2qNuEcGIdrCy}5)Qgk~l;08=EhjR% zn8;`qsUC#XL&!p^uPPcxs;?>;N2&+dJrsBkh2BHKXDZ~sz+wexEDvA)z*_dlUpBBm z9)E@X@ob3w@p2pcqwWCv8J;9o*dX6<$^$*ru)tjuj zs`sd#?4cpFb@cWgbQGD)9m2mqP9*#o$~}Tyd-?k}vYy1*IX=(`6Q{vr3&idYzWmPW z4!-;kYf*Cu)Ed!%fo4Lhdw`MCUMUcJX6EiDe-{8VuWR$>?jwK925#QozBO8Yj@R@B z%57whkZZ`4@h11yX!)(B$kep8w<>*WZ`HMqt-V#V+*^CAZYxEmW|Z5w_(NoBM5dlC z4a=&|+S;|agRlA>(MSu4Lb?waeFgCOYGCsXz~!5P$-i!IBq)H8Qv`Gn>tT%tv_R}q zaV!wKXXbA9=I-{IK0)%}aZJJU*(*XOxK*1uE*VP8n@Bk}L=d#TBWQhhTd6iSKt+rH z$-mr1aC1=H2&JnCpj7~96#!ZVfK~yZRRCz!?d^?7?CRjlClC>}Bh!8&rQj#O(&?3F z9Vifc$d7B>PL-N!h;_&x^M)@b>a2MAIj zg470rR5e=u14XLpssn^sA;PQ;gjv;S`C&a)4LWw^bnsObMBJAU@T%NSazzBLs=`8Y zEhaEkl~y#yi0h7GOo-bHZidu=d`AHJjsWr<0pz;`Kwf9${jXtx@hd#2)vyDQ3}^M< z5fH77@2mdNX4tjz9=X&{fp;IIMWRstz4frt=M!vFkzn^oMZ&Op6OkJ$nrdptc###3 zZqiohp#C&2Spj>aVLX+JglxJWE}^8O(Ex9(ICrArar?7i_l@Kc9B;WV=ef=!NVEv` zN*c#?9)wvup!_bmK*?!u(*@K6(_oid{b>|YEjh^h@4sLFHh>^9_pkQ5nUJWyB}Q2G zul`*9`(Guz*O?at!E@pZptu5OsLeB^k@$^n=M-`x>5nuMYMVbDNm)p`BOmtg2fl7c zlJ#!;Fb)N}INzD?(am%d1>8pSY(n0sLqHX=NU%2|d5VCVo9GrbYbKw?(UMT`&Rq&@ z;?tE^;r*}R`GRNgoco?BIXriUxNVP2jxEm`pjv#I0V-0Q0v7jGN_rD{cGRDym#RU= zeSmTF_reF6Did-Vt0H5Li8Tvh>{r1XZmSqm(+Nbfa4)TA2q|C3UlMk)G88VD~ z|AQ?`z8?YKQ}~-hJkw}#{YmBjXb({k9?2^R^#k>iFdTKZAsj-@9wCQ@CU^uVq!Z53 z!WB~TpRix@3KL1?`K0nGNV2Z+Xu^QR(IDyqxD(w59b@2kO#YffgZdEEn0%y=N9)L> zt;aQNqz`HijW%i|c+XxB^U^NC$d^;UP3;#>t-KEW&7fTAjeL~tmApg-=L$LX`4lsv z3apIGKSEH-1qFd)oG+KD*#gmx3gr4)j^mU&IF6esPIJ!nvaP@h_XWn&@JH{J$HuY9 z(Z1*z=odZp8}P^^NsOZAX0x5+P;=<wgBK*r?`&ZQ?ZVFN^{@5lvu};xf^p=Xl=aO^Jb%hbD};;{YF-b>aQ@n?9+trf z5`6|5Y_Ws=Ng)g5Bhw10y^w8>^bkPqSM{?E&R3O2x%T?c?d-GS^f;X6z6N*cS8J?o6~z@8AhB)zRpfJ z%@Sd`%=j+Q;7CG}w{J{Al0R(5I6S}a{J9W`l?2Av*^l{1T4+PkWaQf6yECt~rqy>F zl2A613hhW*n2n_S?8tS*cMiN7jU=}NNmo0NDk5>z(rpO#3U=k?{cwyj%@zsyWAY>Q zd99H*;>xW^EC=VGC~^bF;hdEDocbw$&@~nm_1%Af*fWb#5Zls_qW=t~2f)m*A!Dye z&W{Z#_>l-Rq`-Xl1_R6n;W1m|1j`Y7_pK#Pu$)F*q4L{rNP%m12;=Y-N%9r)D~_j< zCmTy|GYYx(^1&T2Vh|adV}l2E^g%X~itR{Rkd35!L9Fftv8q7Qj2tA*aUiK82T4mD zL>MQ*tMSz6p5sKWy?!Eo`AC|PpGy4lLXq$3l4c3pldk;obE9Y#3Hg!d@>(NL6PrON zf@z86AQf+3Yj_U5-9QBL_`4W~ze{#r>3=Ja2e0c}9)U;yx-JEeeD9`^M?pt-5Kekf zo~qsARk{)Wxc&KgApkrw9zjAkd6=7YO(&9v?T(%B)eW1>A^#RBghMFMHMQ~snwmQX zaO1x)j{Na`O)719yCP`_Z$^RcsY@?J^62ZOiF>6fdt6cfSS0!O+K^Pa$A(-{KTm!V zUA4!KTphmc^w<;@TNEds$?}UO=Ru2|akBG-_!X0qGv+}N?(`LaBD@SqSLY+?1}Bn=^I?AK z+M9$kns9>yVAFn`6s2SX@ab(I*8z$lhyL_1*tM1IYaUs#Np; zHBqo!Wk1q(^C*~&}#;L6bJ7U{Tlz4#5zbH!=76eEVo8R0PW5XW5b@T=9HF^ zU1JwudBul`^Sr<0wMHIP3SPw+43(3XN1*kEp1$=*@i;R6c~XskuOySlJCoXyHn!L@ zim3R@`MMc4zOLAobO;QD*x_~Z<#vr~0Leu=j6>iR1c2pn?ca7n{FO(4>^7QF$<*fQ zSENAugMVNgky6P!oUhw2-;~|~Wk=H0_I^5`#($8+0i{g(2gXUy*S#k`mp$ERJD+@w zP%z}XPTeZWGK3>}_>9*F(nB|a96VcC0UOSt+){5ioe9pZ61_q(ik(|G=h7a`4^p0f z8RPKX1Q5AWK*%-OzY2+GM}mFni~h)R}8;mPU*gl`n13H4ZwH3I|b?kUpiI$hT^|1xqb8ZjVbLLe#Kw=Js!A8 z8xLF-Hy*fS^P%gH8alFk9g4lX!Clp()LAwR8uKUMjG`tN8-EM@zNr;WCh0H9QQ>gmWi;*;=IMw1;UMTXtGVpnf!@i{U zQ$P6w2HMd}Q=t07bEhg_Q2*sylgrnP=l*Brj~8CrxBW-`AE}Vs^6dXR`wt+wXiJ>^ z_iPgT??nFo>LmXD&$<)+{pLh}|L(tmzu(y2AL84K3`8_H#WWMCX6ZE6^5AMp19|i| zSf>GFP#(OUg79WHb2Qh4{O>h+1h+0cf&4V-;JEs47>5@npTG3S{@vb-jYDI8GW$0_ zmHqonpZ4$U6Z^hN`wUmQY@I_F!Mw0+9a{<0xv*i``i5tc(0@*HexFaN-f&<5<+o`M z072VNSl*esg5$W#_v0CU@M(-AYvacMhv@jPJ6|j>6d6Q$jvJ8JLTg((HwFQm^a)x@ z9&J~d??&=OIMu|aynQ@Ikg=@Cz)@XACN<*AJ2D@+w)q}v@(2h-gB~GBe(H~Hc0P?U zII>gnryuh1;vwAU{0OG7KVO_eK3;mNPx-j_#QNa|{>>RS{>@@r8TFUW$l&XWGy1PA z0KNesgR+2Cn;}nO9R7XZ_EA6eC-|?9PVC!%1>@ld6H^eX=E?s*uXkRNME`9_x!!rj z|J-_KcGC5zl$Uu2vg#I1K;@U3x*kZQg2-1utt7^;b_ce2-x5%JXkvkzPtSB+C& zPI>Yh12oGU@<-@aRb#^Y@P#R4rtk@j!`u5fZ=7FIkzzeB(WV0WeX&WmzDf`E=wp4g zg%5Ud9LKLZO0LFr*4=#YWBOFT*1DUoS;KK$-maQme9dxL4_phxlOBef>ofmdNLrgi zALXz|HxK{_?MU0?PTKaH?@QDGDh-KHSm~d7HGtny)j)GzC_;M9dwHSAJ4jxuF<9kw zkzi9CFfp5O%RA#5aXS9l7&U5u$D(3g3;!UD!vsecUP@WN=^da&VSRqA*7C1rmw zT{PE9?8McPxjmR4gk;D;GTFZ!iDyTePDx+EeC2Xy-~N(pQfQ1bh3tK_{#5-X z3*PRB|2bP{4eQqn0XXN|F@K3?{r|MT=kI+qwLO30@j5mh z-7z8Mc%(eLKZVbu_mMuw>t82nU%#Gt89R;HM&qE|Hsso#Y!TlgXa`^0l{@RPei2`} zhrqBhY5V<@Zdak$ERTOj}MEDs6Nf@dpx7--?^#$|9?p)FL(7%Ua0@?^_aX&Q2Tqf+TXL0tHnPQ_RpmdmizyV zaisdB@CSXfAg6DAXXi|uGBYO?{(nw_zZSoCf;rl6r5QG<*yh^npN%ABhf}3p+x>_J zgw4rDuI>D~h7`-9G$uR{a>PL82&mV;7MruRMmCcQgYj+tL^<;>~p`G*mvGhX8Z}`us=CHPG?$TFc{S5pgu=YqQYycu7BzYR`$hFy5+vE`n z!9U_efi3xDidkri{Z#A^VjP})ivE#De~R;uWG8W(ej=ohC;5+k%2N`1IzIN`I=z3u zt;S0w^^Q>US-w0w4m#!KMDNItYx`qQpM<~d(L{e+Pf~x|g0)HfZ7(GAw}Fq<0X|l! zYw2PnHqh3APNQM7rW;99oXFMVpMj(jI}tgjjr^P}mDuf4rPJ=}@n1lvUb5FtxhQ*_ z@x*%PrP#D(j3a-B_Kx|5P10Z9X-gD|B=ohcKFR)) zc53}){tNx@FQw9n`pZes=g|)n>0?Qx&)2I?l0KGGqmTFb{?o_3>VJVg_w7iek2R4# zTf~#3kM-2(bN+Mvr;kHC5q(a^UYzrvLBP<5;i z|LGG7o`^pE^xwgLl&7*&%o%0}xD!3I=6+dn_F^-b`v z{AWZ8pr5HZ1%E2EfB92M{HZp?{42JU{?zkoQuYTX#3Oh=#^KT^#&P^{n)|zc3H%wmA+~-Qe8x)xT;+sjHh(Ef5nNyN{4RUEBHKPaMZdwFJW^T(B$Xb58+p75 zigrQKE>`pzC{s(1kbVSiuDyIvgcZ?Rn~B^WA-DX!zG;s(pB!?!)T%53BVbLU%h?`|z|V#!7xQ z#^Kj{al(Gw{_49dQt4Sk`}QY8N9rAtLZIKMN~Z5B`X|fK{`HT=^xZ^S65Ofab_JiR zf21_SA-NrWEkT@Lg>iUss{N7u9FL}f{!fK!>#|c_PlEdISf6}7>8@pc#E*|k5#JCD z@ihlH*29rxL%~*GJ`%T~z%lImp0eAujSoJ~(Xa+>Zw_qwQWn9aQ3K)J!@bx5u?_Y{ z5YREkSb46kTQd>{E4gav$Z`a;y^{FMq_z+5{{&@cVv&yjwVmD^DF)hXU89cL*T@X0@!RayGu22?0*f%&kFHb z9sv;=)tJDk8Eo}sGM@3l8Z*by0+LrKq+Sep%+zr<4!J(#gC*p3-E}r34XiB^CXjj| z7m4T*mLO@p0FG&Ky#UHXzW6uwglH25rZc_55(F;xnP)H|C+gCHB(A2_w+u3w)wKGq zM-n7c8;qpP`Ny(26s3#6rDD5sJwUwe^2xC=~_aQVzsB1W)0 z#xmAM$E0)^jAq!7G^EyRB;33h;Yx%GBwu{Nn<~|c2-S*&d2|E)h*61Dn*dZJ)i`Ut z!UafLC#V`b@jUxcszZ_OAeS>At4pV2o$M8g zDGOJP)B{7?rf?WwxZ*Se3^zbPPhM-d6NVmSCz!#I;0`xN*}JEDWA6}oR~Wv#7h|RQ z9*o20UgLaG_6QGA`R}!W{3}fU4>0+sck3fh)BdaEt)qb3khEH`!60svGT(V`D2L=F zwqPqCTu&;JRtt25M^KR1AvGo@2Bk>8&Q@ba(rgIx_`J2rJgpauCsx zo}C~MeDKd{w7a*f;`^#y6W2P5E(+1WJ&if;+v5+oo711mm#n6e(lJ&(+q(9ti z3x@b0b!*SCA=$d_Ap1PBmeK;;K2FJc-zFf(uSu1~a6C*RRcE~xqiVHA)h9>ls5)^| zjH*sT)iKOObt<_JV+@8TG@*=o7~{ypNNiE;UVj&*2s?U)`m~tG1VNqO{)}<>uWV(lkfKGNKW^f<@WNiEzV;oDsjmuB zXi50F;|br);VAR$gn3uPd!yXu?F)b2LmtnDWTmAmE12)#v4{-Y7YqbCaqC95r%S^FQ zt70832ZqJFSxL0Zbr!t>u?Wh%4C6pTr`_=bNxCK3KyrK-8%XvW29isd|1k!98^hDi z1`;|7UOf8cxPgR@f@B~uB>}$XEEDb3wBJ6?How?vLVR#FM->C2{H_tQSy>zT;b1@P zN;r*{bAUP^hEonTmWV-8TO>@1jd!C3W%6KJx{x4|!<-()sZsV@#(>hvGM5@ZG5WalSt454D(BfiW^~~sHp9&jYIVzIzDwg%m$cM zt5h?Mn#ijepKQ`_zK+>yyZGSC3{1+mLNZ3R?_*V5wMsWe{?pQI5xVZYhuG-%-a>-&31d`D+?dQIPy8PsQi}vy-F>v6hEjl z6SQ7-mW9fWTQwS*sBKQht-7q(xV1qWx45~f$E{MRb3!}D;VQ!LzD53LP8E3~=!BIy zwO%6VMFM2aE;NYzJhLk0WMm^9WK#IE<7^NdmpBMUdXfULngKxQ>}P{w4rx1=wkwqg z4$`G7;PZ8~bBXJrU!~|$dAq_9*j`v@*CwMP)uG|PcVnz9{XNFv{O;H=6gOV_P4Vj; zNQ=iCzUO-w`uLiQOpwT%4_ZwiyOaLP1aHSmZHfUt51Tan=GS8~y9vk)bDKgCZ%4$c zk)4GuGanUrK$Mr?iw(kabdX#_A@Q9@RPeu$G>y#5K|cWln><3PsmUYUu3J4}3cQfK zYC43mWUY8LrXLV$c4t#BmfJzy4r;~@NJ5?$Qm%V| z>E+12*vq_K;cGC)%13u$9L5RcFxnrlQ2S#f_5aY3aU+|8ZPdmP7)c41a$z<|?Oqd? z+8wl=j^`l@!9%mxCRls0 zWJRwG1*c#(nPZGJ>7Wz|l$F9fepU#{RR;){+XD6 z92(MtBc~DK%!3}eiLHpt1D7SF+oR@5AxwLmOlb3&4vsAaga@A^#!wA!oEF)BFj+7GBSyC8HtU_5e599FG-lXxXN%n^lGe0tj{bfhuP84kQ zUxEydSt;{_Z?c%?9`uf;qI|7Q9wA>h2|$wdnQy?4M;MaNuhY*6j{f$9`$PE^JI(R^ zovwK!@b9(ruV(S3N|@npbS|5?$-AtrUj1Ykfi% za9S)8vZslA17BGKJtn^9EfdF0z&}wnlU>{GuF!6!&$j5;%dke(P;+}R7|LVYL{JMteHLd=1X=M05j4`c< zulYL%$35^hpK%=L6+-|Sl4o1Mv`KUT}uE zdwSk3u?bEI@k(sRvooB9F|PFpL&YLtSm)+60{ToOTZca~P+Q%yjU^rNH)*tqp*11j zV96s4Yv@V~bea6w^*>=cV@2(JgDIw$>W^WJBi}Z3rv-QUHbxqifIW)o$t0u~=C+`z z?#p?OeU}tD~f*?cv$XyixK#?NyC^e@-z@_pnu4u41&8MPZ4JuNh*ZieL$Wk($?FXe(QGQ^_S&`7;PS#VD|)l$mxi%3-G2 zU_p7foB1nadHRbTNUpFW`5qH;?XQ}FMSt=a=c>Li8eUot;)C|wsBlAO_? zayZE628T3!)bm5R;4VLp#4GH?02TMZdC#z=LCGYogz1nj+N8|?Jg0i|{X;38L}bfr zMdG&bhf(H0BhYdx8F{tWqs!tzRr}~{v_BxY1VNphpt1xoEYXp*@&Jh zCXFdjVcf?Df1nm3wf4DPz0B4OhI~_~Vhx6T!%&j6Z)_*V4R#dV<@*AJRn^9}$PP+P z#&uZtR-W#{Dv=r2<=Geo_f_73T-z7=;S|iTEJ*T=vLR{MC@{rDAL%tkYFqPp)h0#d)E~=}22KSjU9kj&O)l*44txO&p!b)of#v zYCCm^%O!Rb68FGrb~<_(bsmdYQRkk#U8vz;I-^J>86_+urqys>*)t-C4CHPvY0A|} zM^K0*$U#y`h8;=1jBF$?!zhUP)nx=dSLed(kl2M9zOx`{;$%W=BK4DD(=M?xGqPjQ ze{zgD!!-wODf5n~hg0Q+ASqezS3uBDzJ}YHTQLs*J(_xd#LY&3sB+DRy}ChNy3R(% zKUdqr(^12LbO6dC zy2Ftjy%@>XT~80Clj|>kBC{UrX85fi46MK1qN?!Ezac7|4?bt&IHh_=44aA=HvIj+ zXHJMhIKCEQ6j(k+<$bmtl&M`g^HH3xMOxXFnG>S1=^#vcR@}fU1OFgGYP3lO5J&z; zx2kK&qm|Q`wgkh()xrl(BVQBZN~C+6TFCcH)+#^!hYl*GD6nN-Ba(*Ke38W=X^bCk z7ngmJ#d+l`Oa-pCs*90)g9(Yl=a5(OlrN;=%2S{*2O@Fs7vZxAt%=%fQ4UnWtUb)b z^wHWdfH{?DG$x$iUnFFMMXJo5kf@Wz;nB~tIB9q-t$#EUt)G8R<^kK0*refA5&j+2 zKegfWEKZqU1#YmI_$*ngWE+rMtJn;`71x&_ycg2j_M0&dH}$3%Pe)SuDJJq$NaUvw zNtuP8)5%E=l7>%#zl#Kv7YdIzaa@61Y(mmv8=1w*)CV}JK-_~GzDoDXgU3#*F`;J5 zSU8eP_KvlAq~VV|Ih30o+{Fjq1Ud1Hwb28+ykoPyQqfquS1uWApBZfRUqxQBIlpHt zO!_@i=FRYVtN%3e5N5&QCBTv%uecwHO?e>&EhE3%k+`3Rf{7)9!%GuY_=J3~SV9!b zFrlE~ON$pwp2amf$6pR&J5B^R*({%ECMP>jtVD2!iMA;ayS?Iv1!7}fNcn6j#$Iuc zSNt?Q31cjcd27Q^F1XKsGipw!z+XjYc{qezElYogq$!(`G^S@CNZuiGbM4^+)NMPu z$s<{pY#=BSqr2JC#A%vTtQ4Gi`@#>vxJz0tT}uAi6L}1YQ6%m!5JPz(AJiz{gx^;Hj`y?#tq&%+m;{t^O<|T7M1j5QYb~6gMJy>h=4wIHk7|rJiya^UPmaXwM`aIu~rqBgG5m_~)th5}pcjf!VB#*xkO-~S@+`(}#( ziBO(3NA(}4laIG1X@83Gdu^VV-Oe{mv*S=ORGAU02g%b018kFi=*5xmlvj!|j=bE5 zeqFTGsNd@`o7_2Q{Vl=J(u)FJ*zZNM^@2a^tot(w7Ve*pW8){o$S1xG-3TC0v&H5X zml3!QPQ@6%=X)u#Md6Dv4&Rk}e;&VL6`c?0VC3tpx2_+`O))LfHRDfL8ut^HHd)>4 zhjJ$6;_E<#y}O)A%Z&?quw*^_7d?ShgaK?txivPEE+S1WB0-NpsThxo|5H@{wyhA9xo!CAILHJ)@w1OBaXM6GL>$ zsNm23i&3*>6!q$TWZ9`IC9u8lIG0qDqtM9=$vJJ`B zjz4K&*6+@WMU*18Bms7)SIAaAV<`e0%$Bi5BqZ*PtkBI_fD1G zx-LU!HXzxO#U>FY%r(DOdGH&m;OM?U1DVz;{=Qxo8(yweP+x_AX?UrW>;*RdbOXj= zcVayHb6h};ShjK;9i86yj<>wn_W66pscV3?=zdgU=V=mslV(fL23s|+K z_2W2>i`bF;2dBH%H`hMr=yEK)83h`gwO)%eMg?>Lb?!ytHgat^b{RUe1<4+ZllCZjpl^|otqi** z8q+E{baEtu*{_-i%9o#j=zT_S3$J67(D&c(gMU3-viWBzSnKMBX4mGW)A+j4SvC9o z?SxbF2;f@0glyegoe>MnlC3iqWyIpLwD}v7Ghel<3kb->w7gfaE1#Wdm~x2B9M7R6 zg;XMxBG=w!g-DH_qcaKmeTiUKW>l)~9y()WYZw(s^{Oq=ho-~;61PWQRAv@p99E(! z)=L*vrSOFyEoM)7QJ>6Dn-U971T%!OL!=d=LJnD{n(gkow<7O@~GozM>Z7`XV- zwHSlnF*$tmQQ#0F@VNk7eSPguTNWovI(UjTcuF;R<{`QdpKyz&HKjf`Ax9C?IW_oRi#k>z5?52< z64$X{3zetJvTHC7-`R(K^CXw(pR~5!lBLB+&C|fF(7;^6z*L(DNSS~Gu(OyFV)I_M zPmqA^Bf|Iw3%FR%q5zCQbH9hzu^0yyE2+h(Xr8(^XJQ<_JaPXhzhV(x3Z-z&7nTqd zRQZO$mS!4T16?#rjXGjM6~PeyK$yjI(B65ST~kC9?O43Fo&0!({m3Ri)^h3sv!r5D zLWF%3-i=fjKt7ZGRz5i0L`xJRLqxWysux10LL?Vsu;Bi}LM0HDK#jZ>L23P(Mv|!X z)_KXVd3|8a601NGqpVz#0kh0`%1a+nv*mA$4is&-jiM`Cm#$1Vvfi$28_gCqS#Q4z zLOCb`*V?7wcRdK+fnPBX^$5bxdd9~P<7ZkJLjCuh zI*qEg7Zy#{+Y19YgS{}cC>nDi<`|6~)m|t`@~k1UO1WwV#^DQ-?AN~Ig!(_3f>kCB z&?zqmQ>MN zz{~vvD^!RA$IOd=1!;YgHnH>Bfk3#U`taZoCor(+x*AJ5>p{@BQYevPyF zVAiOq1V&Q3;qyy!cxGSxB-D1vD~;-I5ja?w7`lJp`w|%GTq)7}4l% zLuK|)SsdvXU8nosLX!0n`t(uq^v`*%NNkBbpnUAXIPAa) z^W$&1g!xFbML-O{)HbBzNjIoGB0hg&;*$^JQ{q1lNtt5+nBikfDAuZ}yDnjJKxWjk zHM%f{J>7??+&m>-Wbz|#Xd?62%~1d2H)nB?$2F1pD}DM9d0Lyd3yC`;%avVMU>xo} z0l!Z(CHnu_din!ED~UyhA#I$LVDZf8odOb_IUs>V4;x5yhu?{$OfvxvY ziR`KtH}gv~LVdg-flwDt?ujiEGD5wq5^A__4oHQ6P8Jt=NF~(pyXn(%^7MYXtPP6^ za^|Mmp+a_C+rb zw30b}wt-&1qE9cIoj|YED^+^kHLVYN?MSLm1{w5;&ctZ;2(SR8e4N&YYN;; zgt0p|1)i=M`a74zcca2sH_EiqR(`(G(2Y881Uc%u(b$cY3x0)hcw6fD0)E9?TzotmUuUhl zKeoO@1I+yC%2!+I`p)h559LhC$KPPA6i&iek?MQ(vmI%efW*uP-Xm17u6ZwuldQ)A z5LoL*;_$oQ%i@?YRuczxwEo3rMVR2$d^c- zIsxi!Q|icrI_27_7>Ca@ICPZzZtsQZFXX%gB0371HW(-29C*xjkltyrgn@M z(V%l!OKbKBPAJd`>*4vz-nqTFkqQPxBsY76d{81G^5T8tKc%NMp?xozz<7za@lqE< zLd$NoWu$AlP`OQQ*$qqNq(C9Ge5Ka1alUnZ{CPw4oQ2B~IHH3g-+2&_V-Xa~5yK=^ zWIk&BLHSQMcawSZ_meS>JcQ&*AqTl8``bB^iMe3e08t;FVO|m{w1)Ak-I<|+rG;(0+%uTt_`1<{ z6zG}Auc@;Vg7-N2H3!n*ukn2FdWe0Wla5?_EAx@-Gk)cjnKXIQ%Bh(gCrvajkB)av za{ABU-}IR+Wy_;Zr=REReC7e}>Zp^i*#irna|R;UUVil^6Pyaeuk0SoDmemM94DI5 zd*hvxE_FIDbs|?Mt-wYrxRzF6B@mnsAJ?fLzcg460eobkAAQNeM+3-5x$x1w>PLUj zeRQdRa13nUA|%gA133RpCXnQ-sp?k~38mhgXf7k)@xgZ>yxV6syQ`y<_~1r}L-Ejv z#vD+?V+aJfhSjC*EKDh%Zf#KyiHZAg?8$z3#>sv-uKyouTngWri1O(E*AwWOzz3-h zbdCwRwlBSw4?LGagwba<0SklAg7vYV2x}uLVdn$OGH5(Q+O+(3pMqR_mz@g^?Ax^G zfl%AD>aKx{c^#yDFUIcbsLNmY?wMo^75AXr9@Oxa9S!`Vz}3R99GRh7Qh)<#p@-nHMuPMYBM{1$}1pi1eGZXO}i{u3+Cu@@qU`g!bg|!6$Su&(UChd|(Xbi~W}ac!7ZidJ)!VnETCo*I-QsJro^?7{$I6V{1&`y z{X;9zbCEy3=|Sd`Z#wh8`ZmxrlV1b5ngTr+^EH2=f6d^7*;IQ{wKhOizsJ%U>DtpZ zwKn5+KKKnsxlPpC-F)y95Qzop$aSQ0B61z&SH1-w?%-FxlFo6`6!UUCBk0q!UGxLPT zi?p#6__TA6=-IUcnc9;|;f}~Epjz-7h`?bKx>2Q2>i|Zf&;X4>Gt2ql`LO-Gl28cW z%?HN-dG8&7T*oT&kgJlUc|E9-mDVw&u>-Kxp zttcO&+`R>2iw5pquRPc`GZ^yCF8V;&>KPlI_A2s01(3w3k!S$vlArTQTA+(o-^oQZ8 z#6%K53}1;8;N<^HDWAiy`IO3bp?`s@t+F+3)ntijtC>E%Zq4Xqx)t&;8G#tuf8|9W z2)^D85~H(;cng!30T!l;=JaI~?bF*t@2Mi!n8qr3J59BT=A^KRu1~Ous??7crRgEi z+eBX{*hE*TA6=^Z$Y>MY!@5Rg4t$ZJeql<}y2TtP>GRAMG7$aGKP50cyZ<(V@xzfP z`{C&)`{Ahmf2c7kT$X4Sjp%FrwhD_d*z=j2n5a9s2%EWzVf z;MPTNN9b)1eRl)BdFgFBz5R;bCefRd-g4>fEP6YG-iFcJW3Fqc4b}PL#(_10Ey+v=Y(c6pk_6*#TuV3yk_pyG-f@XrPyzB$dBFo7~Y~vd@n8#L$LtA z0&%Ar;1_6cdgWmc7VJFe&E1Itc-Vsa@M$nny%&?8eBc;JMBY8#y zLjE9Vb6X^%$&&3vxWOxKS3`un5?vMcx*C?=>6NcG6}XNp+rylmnRkW<=yD^|eJwsA zn~azaWpz)lVPA_+$oI+xu-CVkIVf204m~o7HpI4U>?MFn{zP^+j|q%JD1Y^ zJrzitFU&(Sxiou(S>VOg;sY7n^#h5t4)e;>oL<>0%(}AHGYBKG3vw8QmR*G8LK;cb zg5Hfqq<)Gzza;|el`Rhz>^wjeVgL_Z@K(5otY4zK}U>Arj zuzdCcjQ0qmz_EdRpbmZ&3A6aR5<9N$ar!=mr64bSp(fg7>F|J%Z_w2O(l>W9`CHOt z;U|!``IV4bLmr$)*XCSD7T=6qNBEV9^`TP2DmRh`kE4=bq@C|^J(aXl(&+O{xKJ#n zj^!JoGDyf9*iz9LOXO-D{Vfv?dQ4i0CKRMIUDnl$wLM3%Wer%zAh@e}%ff|J(mwn~ zm9!$dZ#hA5|IR(_SE=#rl?54C7@jq&Gp0QJD2~tVtl81jjp9XAY6Iphg z%W<4Cnk^iv{fsS?Hhg~>YB+cq>fDSm#(Yg1(=ruQ9wZj&8QjIouaPd(`Pn<;Swn@j zCBjumwqF1BfY|;XotB4HOGg@My!_{0H8!>Z?NlIk`=P(RamlpXDn!(I5Q%N5;rr1@ z+>P$s#cg|JGMN%IC}Ysu`a)2mO)IgRh5Qmu2BK;F{iDs48gw%PUpI9^)P(FHb`+00CH{dEEhiK5a|6M{p=xyVSG+ zu?R2H%Uq58npa`O%a{;LuBxtO!g?-c2Bhb&N>Ym!4;qtRkC3gbn-Cv=&W@SZ!VdJy zg@|^}s`HI8{58hF*Z8ojQY2rz`rrUvieRmM+I#--BTL7CVv)h5_lPa9iVtZC;z_0= z4C_7*bmCZtc51=e#i04vPCgG%5eRWGRVh7e=}J(*Mo$2^XUg9m!^64+J*5kZhw zs5m?}it#(YBrSN!0P(_e(D8PiiE&syo^pLw_6P^4Jv6}tosiOQBQtl{U1FoCj51K* zn46cIW0GGCMh>|w;j`34sa~P@Z8yhpdHZ6aTN4HHx0>|MMt;<@AJm^u!+ycKm12qc z=!%!}u%>9$=;uOXR59gZk&vxibG}iESPZI~6j)+NE#I;MWJ0zsJrLKap)%#!BfaV( zZ;2XU$tV&21;$wMJ1`EvV%UEp*9a|y!9ddE4j^g0iIVgc6D8>@CN0GFArp<6-N1&) zkSV6{T1+~HM^#mkU{|WoCAD}cVlb1TShDam$h@z`r~yG$Qw+w3H-vfc%_gDUL~Xz> z6D+q9jWAQ(Ha&0O^t`WUir>r>fAC71ggqvXi`))I2!OEDD;_1Rn`fdqb1J-&SC}_b zJm{640iMkid%e;dFoG0_*ek9V4#0=&1;rGzVh8|*P{n%au8_^AbR5Td#Z5vjxMpY_ zUU4JHk5{DClL|M$oyIYG={Ei;^b{rK=r@@0o(8}&(iEB~X@He>7BTM=Dj@jq>oL^y zI>6vl7=Z{FU~W803W)0jH$#L*D{d0184XGa^m6GKj^p1xVB_yMk0H;gh2xcI_$NWM z9uVEf%XOon=s-TOd^90bBdULMhPYtB^a+~@8QXw%9~X3fjT*kUqdRw1UuWWGir*IG zeuV0+l^u5-oV^njbRI$t-5E2*?+O}@U*^p{SRj6c?%ZWRjpGV(KSlLoE&W%}dDv^} z_KIBvWJ=u7ZT04M7l@yt-|d=$a@z=5)wps>?oEu`9$}uBg|2!+B{ktT>X<$qRZq{jkx>DIt>(YUQYtILda` zsN^HUO3+GR4W)rs+6XKr;@bwlDJ@`YOhKS$BwuqQ{c8kYb2a@do3EKh{~E>DOd=XU zBRuN--Yd3~%1HBG(@{cvB1TTq;Tocsd2^3~t#L~l#}&vonvm-=e$Btk97kv{oGU5-t6a}JVd>JSZ&E|tMNd1B62;W^O z5FPECivrPczFScs`v38E_Hj`a?ZZC{tgOm<7L_6c#iRl)1Jhzt5*2h-SHq%2v!XQ9 z?1d843KQL3<#aq|Wo2b$Z+CY0W@Sia#i}W2nPMqwDZZp<+it@&L6AMqXReuZcFvxo z)_p(y{KGl3%zS6gyj*k5oSAF7l@x|6;;&bLzTjO#D-!l2VT)RqffD$f6Y9bH;4rQ< z&Vrnq=T6%gGw?d5FvQiQc<{937_$7~5*iUL7nJ@7ZuzeCZPYeE5_5NPxLCIQt`$oe zEtbF)Fz&A<_BJIe+M~A3iNr*zI*1jdrpVKO!)=qqS$A%~Z%{nO1RbavErMU}}h1`y?Mltjl-n@>-zfwJbG7{yJ!q zSFB&q=m3lkL592Y-rw5PtRHNnmEo<&Pr|Oifi^|Kf;){Qd+Bb|?n{aY1r6Po6j9ig zf4=41t!85CY_qgIz!?WVZOf5T4rbuGv74AQT6a_}$u=t=a94Wt5*{aHLRerx|VdS^PVj{nV zlGH+juog}Mkc-I`_9{MYfT@)hv+E| ziIhSuufw)eD3fj%>B>Z;%Dj{WCa#I*znYfF34Y-#bc#~0{*crqEI{!j3gnulMY~{@ zEJdVV|9>JsANT_oT=5-?UCoH|;roA*u z&Iu6dl;c*Vf5!j6A`M)9Tv5=x7AsC{@kOxi*wPMreB(fhKZmDUxTS9Y6;(~XdG^~c ztPTE57UVkPiP&}RIam6Q@N~`96m(nmxcsenWN^6C_aIM8Eblqu;5g(z7=tA1io4?R z81Jwlv8%)7X^rOR`yDXhiFP^n&Kd#Z^oI`UbiJqK3iqXs$>l5Wjmi~%Rb>LdCO37= zl%Yb*RYNNZu8N9OtOK_-%$zueAqsh?S6BoA*r z$8hy*!&!%`^1nD-ou{-;g>X&&8aM$%65D+u3ZwU-f3h&_$Dq=pq0$DBp(h4B@BIJl zB_G?hUh?Q2w3pl<_3wk#2O^TYI$X|u{KDr!pWVkVe8kR*VIm$ieHJ$yH(2Xfp7nns zhmZV5`kiVgqLB2vzTcihff&0lqTiL@uJ3n+=k0eDXK6_dtorbsNegn*76e+L-0)Af zpe@0(Cl3vH+CQ4SI|vOpE{fy$Cj&&xcZQX!ItzH(Lu(#RJn`W9oC^o@Rs5kSqkYwL zqFqh(sH)?xH;S`6?0Uo1B)g3%f5b}~xetf}qcR}L5UWBdQf zx4r#WV`1mApAgRMQ&hnf&cpw=Xt&Tn7f_MZ^+x1B7=_ZeAzyW=wJMH_RARzkdf>(0 z{P~}Jv4yfR$27N)@xPg4=yren^iSsa$D_uUFqti|8UJYVw;Y9*z~lN}yp;5M2ebqm z@pOTfFblMhGjaT)wH#<6QFs<~8O(m-V?hy1o_WH?O&KmEUB%2=0;i6U2@AQK%sPDI zIdp|-_2*l^X#9&Wc@|Y+w4x|#-YfqsD$Scu%N!M*S$1DA^^L($=@V~v1u@2V8 zkh~WkkE{c76`M8wTp8|v-VE3OZyAOk4ClEt6QN8Y&-Oq zgT8f$DB!Rrrrn2eX%`MwjT~;#oto$dOOvqkh(?DK*%g7~%(pOg6d5vLDv3Y3r%*uO8*oY3F4e~46LrJ8Ao2_Gu# z9uHUy_tlmphxap*eE|imy+dSfBT;@o6grpv!~fDg)`hE$Sqxj$6bf|qv3iF>XSL6D zS>$6R*k+zT$++c{$FYs8*?4`kFUeb6A$9)zBO$V;jWB0M`U^ zBWxWwjuWjH4TDq2mmRE-NV3j5W#iw#4xEzdNZ5|HA4L9N zdmz}WBK7ZvzQ%t7J2KAD%--m;^0^=FvSY7hJGo)qhUOhm6lE|H$T4ZlZv6Xt^;iFq z?x_7%^cl4hRgT&pM}iHmKXC-NYUiuLGU__ivj+L)!N}h*SluUh0_tp=VFRTfeqWEa zHz2><1AP_FS0fU>k9alSJD($mG`1f_{@;5bOA~T7&*=d6+ADHXv({5?i5m&^X!{}L z5B5M(*Y0Z3^l=M**Y*aNU+ytdN@|CkdrG^aOW_5{dpc|Q<<-H}nbpBn@MX*`_=CHN zg${y1LVd6bi_kQe(Cikz2Row(>_7rLeizo3i7V6O!>UXp#;9r*=#=*5!O&Fp~9N0K|$tr>{! zN`pAM(*B+(-=dj@@-%nl7s9b;&F-Ro<8L-P?BQ6%^d|bv%xtlnZq~%oK(apki;WtD zfrF8FaQHbYM2+>T7rp2V%~Icvi5Yxb4cupMYzpIq_QEXJn?bpzrN;>64++6 zx82ubX5Sdg*OE9S*}Ip;bI4g!+6iP+v7HOsk}fn#0NxAcqNxbbb@k7=_77AXhi!&Q z9gsA@=fdDd-mC77&Ou^vKJuL^DL~@v9OT?TcN7w5BjjwIGZcx#ZAk2n#F-9cIS!Iz zqG~6S?zKZzTVwE32B1yqvy6csm`85INx-246zL?kd>LDkXn@*v4g^ z3N9x5X9p;XB45`OQUafz#a`6N=*ad*+gJ{1XsWR-Nd|4tkDBUHHn;i3fxKtB1#Tq0 zV+L&88;C0=TaM;9-qTF(D!u)I%(^eY@_?r`gAkHR2j?&RdQA@04_#SG<50{~~72InvmLoZQ zv5wsyLG7Dc5g6x&jf&gJh9&cwlFtB_T9nLEhbLk;NgA&}ReS280mKcyU$+y3e6{%N zNj+tj)Xbqs8lr&l^#^bgQ<>QMh5`GNEfEbP06VF`(yy-$2{nPb2>&|RU*!*6(55Kz z_m6-uDkRQW;*2EDC&YP=IByYW>xqyOc!AvmuD6#$%~)kXM!?vU{sby11nw%4lcJ z43}qPMovY^MN05(mrxUY9r;f2&sGP2g3w4Bt03pP8Oa-SFH-uF3W4)D;X{m@gR)#S z8(wPsY;v^E^nKFsTwoHl%>}C~^}&Q($(1p@!X2%iW)e=gg>}eRJM#)8%}|iAiG8DO z^aP$;ihJ6%{eU~Y7CA!?uMPG=U~@vt5(q)utDXKC7eIm=a~(D@n!7ikc}F>pt45P*QTy*<_aAPkDOy97C;h1^{DNPv)RYXN9T!)N zloW-0b#ule=laLTqU{HeKhOhJg@>vdG9UwpC}?p0G;BDTWS3rh4CgrV87n}(y2q`^ zxxVZzT-a&BO}@H0Jnmr96ZoY9*MKHhBW#B)ney+Ap>y8P#xEEaWw1Y}+EJfsrofdv zfYgiGi}BynYml#cjtx0C&&@)rs`aV4M6+A^+x@{LMtQE~R4eFR6>X z7n+bxWrSV4Vi~zYjXA55UXP|#L(9?Wlj)n$q|LPEvZ%gBui@9#8N1&_D@8c;S%kvp zb%cV{G;AFa~nTfvQ%E2YQ|8_TDer7#2^Cl)*8LX?s7qE{dED--5OWP*v@O zPF%}89C{}v2-}hW*G|YEa8O|~y>2qH{DG#`zPFg;Q2G{B_0)FnC0E7nodWzr$g&4* z--GO; zX@~_X*`2-#Nh9Dn0}HY&#u7k_o%cM$ahw>l#677R3EP9~OKeEk9heESt%TmMA4PR3 zVq;6s5OxP#IIzFP=1yOyR=jW=rSHVU>e6Z?Y=x_=-NM$)>OiaxtRDR(HWYPWSt*85 ztGPHlYDLIXdLIw7CMECbPcE`1yXy*49bc5sYIqe{*5mQXAMA;m)`MitKw`qy76*ri zATUSp8Q1jGtWM!kBZCb5%-zb?S8#a$k?t^TMXqAFRfCTab`f{~8am5;JaC^0JJ!N5 z<*P2a9!d6xAAq@936CN!>6EOSGPFCdykpfHtL|Dr2h8ABBw0@mjOQXE%K11W=e*Bk zkXe3UeNWMriZs)A^fKBozcCOx_x*MJg4?4E`h}Lpf$(w^z0nr;m2W{sEhJZaehQxA z9P;VG8JcT%i#f>c+{7=MOv0rhu{Z;_I`2kUoR5Y-eoka(US_rHGgT%B+qs0}ZegEG z_`{vP5qUzE$Fp3*335E+*kHG1AKD&rTQ(x+M&7dwjC!0v{v$C+?B5+Zo67v7h5FIL z4!5v1Sm|VRhDFLt>f3!B^%w5?x}~L{flo zQ>;*ynp2TWt^>(;t2a8S*RrZb)HkfJt5-eaDS>kf?`gxY=s$#$>Vm|QE;y+!4!*LW zOCl+joD8fm^haWJWU>XRh-u2YnP0u#whZorQ=YIAW@Zfcf#1y~M8rp1V3$5cFGF~g`1D(}A!7iwiD;xqah{W zYcrqE)BB%H+uw1p1_1ZX^WKlRa7kw%F(2#kj~5~lIgnVG$XDKIL(VJCXmO zm9H%5km#)|xf)5gD8U%~Kl0U;r6T8^8IS10lpZ8jTbSa()tTeStius+vLNT?IX&1Mjr|3?|)J|95j&3x?n8w_#N~XtWJF8c$;@` z>2DizQymsO!*+0y_G|U>W0uj@G+9Tm1`gP`@C;NnxLv5}B}Jr>OV)=Evx{h0>5-TH z-WEQ#O|QLD?*gM2A(j3TQUb+B`}*~yzL_Z*SlZ>0IL?N|I~>T<5;Nx!SkoC{6YY=p z0RzK#(5q87j!3j*R%g~CPjk$iZrG=O_1T!4n#5@X6<=xLE5};+%JETrr7POskmx_? zXur;X$ku*G``vgBkHcHE@R`+es!dUXuOiR7Ge{U`L()yyb%@DldUxSeGbEmaF-VGu z_w6mUBO$gbhU2)vLwKHXG*(f9zsu!^LrS3IIpjIN;NRr^BktcrP>{@hXCuP*ov8Omn%0S& zl!m`-A|atokl0#_;eFOl94A_`3nomP!uy)-dhk>V28^f0!AmdM{}R~2`>w?yD=psY zlDI+rN^FC!=6$I&@=AL5Q5AJ-IF8G#4qi;BGFYTUBVkcj_*gCtv+(1pk*B%coZAqr ztw}V4SRk%CLnbZ4m zHHG7Eh+<2X6vlE=7*0Pgb&flK8D!Ss5abB-QhEe>5%qgvxA;ace2eC^WW_}^DO%oI zlIwVBV#o6*scmbJy55MkAJpQDt0`|S;X8No(w801!>h~On|TU{fb*NeYI3+ySWf-q ztn5ber5v(M&hx0>(^u11SxaAG2QMk@|4NcLN)=SX(P+0wLc3|nxM>r2Ih%Ruf%D)n z2<$bT3v$?Yo;)denbmG;Z%B>lrf=yM--HOGom+UR;5?+f%-X!nx?H|;Ir#IudwJiH zSaKjAPL7k)2xY0oZox~>_^5_k)N9^WlS5MEKFvjRHx{)u0dh>T-}!O)=6IXjWtA$; z1S(Kd=ywm;d`tF6W$witkABinHil&{680eB1XkRnV|1Sggps_oF~p?GnWt&@LzCP@ zDG{iRF9<5EBbw!MvJ*>iM>}vcPor%lg|-pEc|sOmg1t^eIHu5&ObnM~2GmJy<|z_* zheiKuj`tMrg++6zBs$Ss%Rk!=rYmdUUg}QmNt{2aqnt@n%Rm{c#cZe` z=W$;0QbDtVRu0sl{%YJBoC&B&OEb)n(zhb*I(&J5GqZ^x5vkYbX+@aZ#fGFoT}125 zUhs*&Jrd%&1O~$X^&0shPEi7VVUl8PBbWRfn;0nu7f~_jH5G$=W&5vi&_E&_sT-o; z1_>pke2)ZLeAHy1=col)n((irMI0wFYe)vlNCxizZ!>U32xnlpmH{<0I0vq#dY8X> zu*=h6aao%2uZd(_MZ#$k8%|k^)+V?*$1dOc_cEbLW!kZ9Kd~L_^Y@sJRYYn^SVXsc z(#6`s%)P;`Zs{%yRUP5edZAl51k$j;LV8pM?NM23kJ|rdWRLotHl+%!N5#OlB=E*X zBiLEu+{}CG+L4Cd0PS*6zg)g@lN{r?X|wc;2(T9WK(`==N@U z{%<&6nbbMYyLSlxEIqF1TsNl|@3{|irQGNa>M^F_d}V64Jnv!tSuW5Gw-Wk$zz!Zl z3*w>p5psFZy%YX!>01Yrm>GXJrmv3o`QT&Y1Y&xre-u47^A#cB8C(DOjk)nQZpxl-rY^6j`RF~JYv??}VBoM<5KfaK62~4{ZG5 zRsesAm7l+aEFs|(8##s~PE0M#JQa*_NyQfA+{a5_!#r&`KNvGc?AqBaj75B9Nk5xg9GmDC$2;84 zoxEpI49De)3BPv7uk}bi@*lCfE&K9(byzMvlpmO-D2fYqGs!z!LyBA27MP5G!t>Hj zF&sBq*r6rh+&0HHTG#`3@P)Gz=nqSAiARcGn2xNPJHT0;u?9xF71J&bkuPq=90 zn_5E(5_TbBUEoPI!1pvjJ)9RO0Rlr{5nw=<`dDiCO1IGp!I8kqcZj_s^>|x>YWQj! zbo4+4e1+T*c!kKJ2jM;-d(pVR*Pcf^xAO~Qqp_B;n_t)z#c`w4x47w6$kFMWvFZTo zOw%5hzh&^~o||uwlDgsmpqdB=0~Jk*;&$%h7k(ebac<`(e&N?q9Op_0p@}K3^p*&U zt7pq-3!n+4CeVC}LYAqJit|C-J$Jb+Fk)ax;J#Ce0*0_BN719x1yN*`o4yR#quqtW zPi01!p1bG(Lp1&!Zed-Z{!e-f1MCBE{=qN25Kud@ibPQL6iiTbgWV3d6xYKoEK*rW z2w~g>fUD;h9>G65xA6;i;)nEgSVIDRE`7a$wH`@bFH%pj1?HWk2)O3aS;H@U4dVH6 z^#VfJXHKy|Hc5v3NN5ava{`)64!Qm)^X9+TT##p7qn2%HEIgAG)#sKxP6J)RTgwg#BNV z;Pf5@j#KYJ$W6_WAKgpzQ!u(p=0C7v?oKU6Qo`D6V~J|>OpsA+ipl*$JuZZ4#SQo! z$y`77ETw@Y>(femW?5w|NQWizdv~5uu#6}sGGewmJY$^v{fUqgd|x#)s9hUw9e1tz zkw_{e_k(w|!yh^=VayGJ(RN%u_(8t1z9M7@MK)rg8dQ#+zb~8CcG-nw&XT(xuLe@334)|ZS zI#6%u!_}}XAzTgUeAUn~WL6EA7MWKA#>waps0PtXiVDjvN$kjRnYC!%>HJaFcDxT( zGCj>5`1z+hf>1k!sA`X-38&OC%yXy%@|=$1r9mXNr#X(F?}rG5Cz15;qRH|;Jd9o88UpWRlC%62(hXA!IG`w07s`pKOmqjy>pV+T)gPJqX&L=U{i2r!|U~`a{S% z*bq2>J%k+J;A-0ImL@g0{H=psosHUzg>_ z(U#3ONCjQ`;wi(z-Z)754tM%?mv$|PyXSUf*^cW2K7cy6^S*~d#NrEC8Cy$^bkRi_ zuz=|n-lTSRmcz^d>&s+qjjVaQg)6||mwNSq%?$@Uo(Y7si{S=nOK;3N`7+uf$V6;4 zJwry=fxT_ylou>3U;zUc@@_l&~_B%A>izF!E@LCXa0KVR@8uK6&)mX)}5B?5*bV2;=0c|A0J_ ztUaDk&vt?F;j!H{6LUmr7okhcfq~B6Z0PJd%eP@$ySTGAZ8?YR zzozbQ$o}LVjO@=~Wj|RyO&F9x%eu>9rmV}gvR=-I%R1wHWz9F6m38_A^RmV`+5MX; z`(;h`UmmT={^%G@_S^XRtE{AnLt#o|o3V733Y)2fwjmG{^-&y`S%=FS8n<%~@B0c& zAU1d7=ig(6m`6y=BQ$0=5>uivC#Dubw1u#ANNe2F(w(2*6GwM$<`*Us&cX2b&|Uks zVfh1zYf1NTw(>rhzYAUUeV7as-~XcZVcvTYwbw+;A&H;=UJUcP9{*u1WIWY}VPli$ zKoasaNAc1oY6v_R?tZv`p=yDPoVKK63KkeoFJqcQgW0H*q8)ZNj!N2k1C);1TFGX` zLL^P>-kq#O460?D?rwe4eeCTjOv`6E@ zjW+9ijdsGHW{tLFoOz?gIQjX2+GwHZgs>H8vff6&l8UsP1#jen(Gt0N9Hqt!t4`PSFw`UY3PfYC^xbn233XhVp9$MFn~{D+*If{Cc*Q;?5`gcdd`yfN&1kfB*N)WFU>o zqkM7zLQ43s53SS%`$8e>lov%`TC(J7K)tk*L731V}M{%W!*9Cq#r7Mn& z-~Nq8`~G)YqgBt~&~{7gxh8zuo4(>3`xtp!p~>4TI)vXc@qBOj>K|rr`B{PaTVkC2 z(-G2i$wyH!re7cfHja};Eivi(!0#s&Mg2R!ChqFMrzc4}DPbaeA{Uq>BL9wH z;wuy90CDyZX9sb2e7;i19t+&L3OVicLjXVEoaIOhN- zyUK8aCG2XibtYnUq4akWJ_FWr26Rn|<$^z7wmnt zIFW6EdzG+_NOjDhRTRDs!?&0Bt<{gN4O>`2)tL$^^$Pw(#9YCjBFEG+UkwYXRtWdj zB&1(PMHIJ(k-~_`r$ttfPg%4b#)3rRG>K$C);@gO44v)hGmhiQw;T*9!40~&!g3mE zsWFng@vD#$c&bg;-^GU_+4=`bDWzA;AJHzu-e)QYw;4aQiP& zZ7Mj(>Qf?qkgZ=Uh65kII{ry@?FqpS9C)#k)EwUXB1i5>Beww03vt{Ej$aJ7d}(j) zO)aM-v6A{LXlbodON+KnC|7Izf7;Vs)-)!(`rc1l*i}e@maniVb(;f^RvpKXfdndN z-UeJ(@NEKWMp#YYxKYqpH8d|I_IIOAV$W;AWx_8UMwPSKG-@#|Q@C~|2TA?A;NmR8 zO&lR-Xx22h=p}{Z4Dk!${!pPT72(-B{?#SLB$D@|>&kQn7&scD^I2atE-gq-q{&5= zeNkX8+0?uKv9R5csSZs{fDIe40F5Vt`=mC-O~gbQQKcf`^j(0Qr{IzKbGJ#6oTbu) zr4h9;$0V|xI0QuhkS{_?pzO@q*0ag^U$S4l5DWi&Ed2A4WIgk?Exc+9W`Ty^O*R&! zXiL)$JOzT&>%Q8OQOI*TnqCH~9a|EuD}HcrNVE&!Bs_tg@B|v9+5@L`V%1k$G8ApH zPP{&zi{>~^v~T5LziJ|umXImypzGs7*5o74hQjqo%I<@IkgWc5O0SUh;yMWos|kD_i%17pO0|$!g7x=d4hy zvq)rItk>;XN^446XKs*XW{2=GvC>%ClY<-aMr%}CXU-zu{HUVH#YKuD2R{no z?%4TTrXaiG!dmyZLKk-6@RnfjnHA58b}@wCmG}LE#}>ibH4EOW@f#+37aZLa89fT3 zyJ_@iN%TLjh3Jm2ndrEm%DIuzcU+63+iCPX5`E*f@to|sv4!lufa#WGulrETIaSgW!=zq6}Ep&x?6K5fV)iZyF*SfMuO-KpF#;6xFQDqo*t%YV$-_%8g>(v^b zFhor@i+Z0i>RZOB2Tz(rUGs(^!}l^#bB6Lb>f2^fmDky*&|j`%qQbdL$$Fnz)b+-w zCj~X?XI$ZkW^x$Oh9qkr4M0z8+K^bx)9nW;;uqtt-g-lVh zkYF8mHMF9%{Y)#m7(+sj={^Ap)|UswbE0)qD`9}S3ctoBLH8D~|;Gly+S7_HRo+_ z@!8PcCVVV?1yGyK_w|dkxVyU*_u|Ffp|}+(Qrx{1cP&t~xECi0F2$j^2WW~G2}J`0 z`NHr2&5&V+d6s?l-gD2nyScl2wX(5cw|mEpQdJzYpsl~aOV4{$$3vm;OGZ2`cP9?$ z39(J5fasAoDE(xfiAlP?0L!M89L*1@4KVr_bQV|$^%^AYWWSbD@V?owD$ zDu-h0lJ8i`Z$@6(bsZaRjm0Gvk10oNFKLEp#w5bNt!7HBju&l^s?Cf<57|~0{c8m& z_PYy1S?kwGdQE(NAesj@^RUmqQ|@HtFTxe>lKSE*6Z`NHZ!f?p0}DGM82Z1S z7f{EBYBOa z+yS){3FD9&m>^%a&I?4HZ8gwvIl_d_gu?Dx%DyeBwDg5`3j$^tgCE5MQQFfaZ`B^; zQ0DuQ?plhia^D%jdH!DrliqvOWb`z?XOvWUoBETz^ zdd7SkFU={^D$*o_s2Y*l^UsFW%cS=bF=YI|kO+(R#tE&q$C!s6e5XHLl+(Zh@N{blsK9wzs-foEoR zb2W=vZ>IH#a{puG51cN#q4sivga#vuFTacLPBrT|U#yRPGhqnPiNTZSuel(K#y{yY zVsaStV#UeE8*roaS4yV?{!L;`rJKwGz`gXg#8fELwt2FdjuE#{*ds06>+`Zv;b5gV zW=F*)U(J6`^ep#jL7&<=cpEJw9YyZKa@6z3(|}ZM*%8r((G$kWrxX%GvUs!QFQ@kV z%d81>qc^v{acbn+BI&q`yv9L)Xjym3*OIHEEWJuB-RhQTk>qYQ4J5ki;;5*q!GI_U zdFH3OVDAts1nW(O<+R>ItdKp@yIN?)08;}Qq1u&%4;aqOts;&?2JHG(#jq`O&0`zK z$hRb3Y&2YLycNY%ot2DE-AQ>jEz61kdZ!P147!kcmoevPos2wo|Eic3fwWFw-fN@1 zN9^IZNTTijw%=>crJGVwUCvFtyHJ*)$7&LDH2?U|uTq|O^N%-%#$~o8sH@gDBL65~ z?DNlt?T6B?O{EO6nO@6lYSb2ZVMP^OdD*FN2tkuf_xNkPzW+{Q98E%iqA%6PbbY{J z{)>mY1R;(34-ENUaX!VAawOkd0K;s6M|+USl%=*1>%663TYK5*Ug6%X<-QfhX$6y~ zTHNKu1_O)Ef*)xx@Zn1x_s9)VX6=NiSENAm>V_cKd0NNkwsswhcY1x}Lq?GydRw)! z#>OcM2=&-~&uo9TwpR2h${3Sm8TX1AWG$4nSA4Bu;y0=)>4(Lk-o($`mn89N3Fjm6 zUGkiUiS$b1)p`v?S_wvE=xADWAMHZ4HbE|5Y~(@xv7$$hd#~Jp*~C35p@QiBxcFX@ z>1w3qm%(3fBle5l!{>qCe|cskEPACyOTGX00l(a=Zl4(+(+^boKDU$f8=0yjH(e}S zN`R~=IsJhdT;0i7KKoo=Yn%*YR44cy-?;~>iDO+|sK_<~R=s9tZ z>5-o6QFssU@VSui6FmOn!Qv}TILbyywE0)ULawZ{r0qZpaHz%YWp;Qze&wpO@3w?E zl=cUSJzB!;n|cxBrTVuTtmQvCJW?RU3{dl5)80x<{DFaLk_XBAoHA?aQ=Ste1(*S| z(g-2QwoLiC`^~vpns;937D;U79P5WJF$?{mhjkA_bk=f<%)wCIm&AhnNhQX)_0*?k zMhJ;s)~bC%XX`zTVqtu{IdPkLUM|NFDl0GTLxK@+v0E41O&q_>eMA>%YAd;z+bQTl zpvei<9TFjNWNy}YDd_7rGm+A1J=XtILu~BvAoQ*M^Y^lj*aqCMgn&$)rbGo7O-#G*q@}^vvFO#@ zT3SX*UVD4=J+MAfJ!G2oUp9C%pTb-k4z(`N5tl?oSTrm-J#JmSLp4PEi!G^X=@WC8 zmaAA9u_tDVBpxD!LSm$L zdj!8R4{*;9iHx4}5j@}94ejuCcVId|a11fFdl`+PFY#0$1%rF}N6 zrcm~}1t)J!D6pJWMbX*vzrwm#nY;!UyG97Yd_Cl4WB1fGLb-2Nx>ULUe9QhuQ%N?_ zVy>L$Iu>BFy6Vb+d-rr;_O2-kEn2}{_>2m*s;Ys`uk81|7;2IQvuUwWIGtwrC{ISc zQ6XlyS#Cqc`yUmRk{|Qd3;70Gadm4tsxU~pRN5-*`mElhAwW1OmB<-u_c44!nyaQz zxVy1Xm}@X_+wyP)1C6&dlx09Q=#}5!T8|PGG1$LE8AgV>#Wa9@M%wDDV~ZkEEZ$Ac z-Jc6&bF9Hb$n$p1SZj<`!}WMINN)FesQo;8+8w_!{s!f`G1Ck8WQ?i=tq$vxUt8GWF+)^#?@IP497%N%N)c#IP@c zNd6Jit}`yIRqyGt4?HpjSHuKVYbyR`W0uZ{Yuo(!j{>ZzzL~s~Y>~vp1h1@q$NAI! zdH26k)Fh2^wf?R%e3t*Z6e5T-8#T%kezv+)Ge*!^Yh@!xT6-xA|I7n8p+9{7X=g4o z2CCW(V`ilTrM~DuMF?UQ_-+&c!K=!yu!OhaxyyLspyI(b?S;< z zV^m@2wV9*&!{=lCqj{4VUP%>E)IEF%PfE1rlXFkY3S**+6CF)f?V@6eOs{hoznElN z_aEfE!xLK}-G_H>M@{w0e?EiAW@7@jBik7Mpsf}uuv*=0@O+PyUmz8DDLNsw|!wP#_{9X1kMc?jIF($TU(k1Lr?;&T?9HeXDsV z@pPe89Lf1wcI$7vvtZ-_%Fg+s4v6ynLQ~?;lK7(uY+dz5tcW;E!kpS4!2*>bGaFb6 z$JoOBcm$=tGk#kCtK#_joXRoYp!&VDQkqT=d0VvD{lVOB7VZ3YJNkW5s9=rc$$Uy| zf9>8MT>83`MxT=DHb+*cN(6C+M%=?+^^BjbvJ{heF8CnUkBKIQZ{nTD$U4*0czv*9 zx#WJGJ`AFi1Fnzdg3Zpg+W-(WX4VYeTmUO$Gqy{HeW$miT)6nOEu7ChgXx#Y_p4-; zT*!CtQ{>(iSurK|v3s-Os{`+u&sZ0iG4!&?W$9zvsxOx75w{({zea`C3VK|g^PW59u zFY8=ih@ilI_=vKCzxH@wM<&6l$CM7l7w!?wJrOk zG+$U{;ho-1LO=^iS+zhpDvSX!Ie3J`sNJ7C(>+4&C(`6ZOw`v>=eL7)wkgNoCm$TT zll|VdWbL~y>r;emD`jkt{2cYZIM+rLELGryktc^c_RBnUY3lBN(~ix07h<*>a@}}R zOx}I~hO>PF*2h={W%nSvY8SdZ|D4imeMCMY^I7eX9*X%HF$SvJ6kNT;*yVJSg z-f8s(JZV`mCwx0HXLM*%-jt zCnjd}N3 z*iZboXqYlu2!e)?8mq%sbL5{t(?x|Y4UqSG@m{o<25M_yk78&RF*PH0Dgf*@FHmOo z&c8wxCJ}{x9P8dfq+XAhDjH#y@C799H(?VTc#7wi67}w^GWD-0Jxy_wcra0a@X3o z7+S$ixNHm`v8tGD;gGQC=sI9(sZg#nJPN8~d&=YKvl9{O=>^%ol-?8oBGF~NQx-qd@cr;USIy(=CLSw!jaj_iY*O1;}qjhNN>M3Gdp*^VdLz|4Ie&bHFq4pQIK zvX%)-V&4ZXv#GS1{dc27`t(@B#k@`>F*_@R>g4$FTa5fzW_8Wmr|H?z98Nk@GVmmS zx_)*pgzeUc)B9=1$M)*4>GM*=d)X5v=Bm|e>fJ}Qo&OepJn|qOJ%tVR(39iSAOX8j zx~t1j;!`YU256rmPbHPn0PaYFw0TQGtfp3kr}rt$lzuaY(#tx*wdjpo3dA-iU)s01 zpHQS8s)Et;^E+TyGZJwar=l)@mAu|;)E$LT8Xhb#>g}(Rdv*Q#KE;0iTZri3?OTF- zcEW}o!`lcn4C<~AZne`!)4UT+9tBl_Jc!#iAiz&7**mwx*VLo--q+Md%UJO@Zl_DH zB*`rLy&-9E&oRZf<)t3roy-=J;eqKM)cEDk$(1l}3zboxBoh8zg(Tlh$`4|p7AK5z z_a*ni_a2B`iJ=m)ZkobT!*KMs%bKhvnA9WguX9{GZzlfg62G}~T(o~0{%SP7Zk8vA z^3U)GLgX&|K{RUB=M$H676-c^CYJdp<7l_-CN){dzCiv4^GV*$!u$*ZX`mwb77@4nr-6m z2#0^BKmpU!1Z zlUpTZpM(EzkJ;OpdqAyv)ws`J;$DZEPAM^7a-1%L<@0%gQd+K7J??iHdfflX{KhYO zL!J#EJ;;ik_~?VsdH7-zuch43AGGp%tS74YHQV|la+xw?nw@hZ!q&#+C*sHGd$xzH zA)_KWy$(^zI_}5DX%0LZ-qYxxNOSLf--%~cq)6Oia2)?uQbWXE+?ZiAQR&uaHC95p-x+bD-$h7U_qi7c3hHAC8B=V(ks@()U zkD53MDuQ9h|M^ER!M~M}({8K8Lx`r}BzE~wy^k#u@yY<;C`I1T`xCC7W9<*?V#A>-%;prW-S0VzM^;mIdG#DuxXz*NXnlwu_oW@3svt~twd%n*oj2ea+@>T+9Hqr zK99uZB1o2hfu8<|b_De22oIT}#O3If1g_s-ZtU7mxvNO!1Jge#LXj{mi$ZerRU zo+LFS2#S3pcUB;8DXG3YY=HF@NweMH^>4H^vSOu)L|Nz6B0(qPhwl@KP*Owo?oD+P z%v+53wYl};&Q#s!sN(8wcAeX%?!)+>ND^=LSlrha-wbF2sjRfu|Bh0pqw^}&5F*Kd z6pyhASe4%3A>7Fp*%t6wKmSzOO_<^#DsxS-e3)G;6WuE1JqvP}SK>|GV0f$j%X5Wo zxs|Gk8=?_y>`f+0|Gx5zXqxWw&3}+mNu(o2n_l}F>ip|p<{R$!oVe}Hp=lK&A`ej& ziiCeL)!Xm+24tlEZmNUwn@!Uc z?2tvO$#A`w!1>}aaVK3euWA~%MX&auftPJN#m`sav0L-Ei8 z{>eE1_fW2ycyK+B{82`I@@ah9`R1Hx{E0@v(g#C~?`GIvqBby=tpL{UHW$>QsU85P z7PfgHK9r3bP7y)-JFduG&Sp!7I~p53YMYPHlIouNe?MI0X|5Ry+I8thgWG4HC4$F8K4ZAMJM z3+~~Mn=eVIXdnGt&39M5ibLZ2_5ZkK9VXDWwz z22-Ie-x)mb*X4pm;zZ?_9`sIlY(EZ01@Bs-zGoGPw@b0Bh+X8hMbV6wE6|Vj`)_ zF^{2drU|Tnj4o<2vpJkK6hET@Tp zp5KCZu^kX)8i)nu-(4B)Ybt^$4Yh46-GSNljKFW;B8Cc#w!TjQG)tpcU`Lwp4J}xAQ%F3*J%Qsl-$U& zB{IFXXI4_(vHfEx0B#u(@L`phjY6;3V^EGDIB^K86EF}r{e?0$; z3jUV9k-jJW4%B)LW}|@m2tqr$u)*5q*f8XYK|C`aY~MWldKUUa$@sxqS?~o%8kh^i)T!f0q0{cxPcKdx zHn5%)Og08SuFMK1W&-%44)qjzN&E$24)rW0N%V|WKer&RRD*YzZKqPWwNdyxr0YK0 zgXlt(fMP*VDQwuLp~If_ZV~yfqrR-J;H)sP9Z>X}r|7oLVz`N0b%Mv)q9^1e(bujl zQa+>oddLsL5nrC4QQrhO?495E8i?zv)j^|yK=V>*I!{3zn!H{R@R1Oq+1tyF+=G*Bwyr4=1MRUOt)dn{| z`+yB%%&vPeV7JWjVPe@5@m}r=-r>1a`=sAI{kPm4KXS(MgHy7Coug8Y?SUeK&>nFT zj1=zuRnEjpSc;mJCAngk-@61^pWSe72phH*`OHOliS?psRR#?FRg?}hOjY;Ti8 z2?e3eaznT8!=;93R6wtS+<1nj1$Hn2RO=>UlTvL79?yab+uO7_-c(y65NU}49b6|a zqjK=8AZRc)U@%EyYHTxm+M}@&OdABXi^;bc3BAOPl+2&@f`zF74aM#~iy+I-lTlsn zUu31ro6Pa{QkJ|$B?0>rxy~1hg>9)z;@+ka2^Y;yN`H!MS9nX2g z1;MWF-#nC$cZ1NTH0?mCx^5Xo@Qo8b{|*XRe?yE{HSL&3ur9P}%zO%}?6lv}SKT}jKH^E~l1__E}eeCwaoL3=?^ z=a_uQk}8*`8U!c#Z#a=#endkB?lF(I~H{S6+Hylmf^MaSQF- zaPYOoB0QSsh4=P)Cj{fA@q)WZ8`@8=?a){cx+U)%0q|E&4dA7lz%JmN^8IrKnCE{E z72UD4-{nEzkP`#e!_@=+HgMS2nCD>y9G<21GKNxRy zCZf0zpqZGb7sH;Jm+kO;0E#()-$j&cy)kzCXB8!3Ff(lryqIJK)7-t7Oj;fkjCKfQ zqx*m`BXqer=IMR7aNV<>TYM3m^#zKr-2{&MxL~tpQ7$XqSmbU)Uaes^yOqu6>0ZxE zR-?%_KoSQqCl=1men;3#r;*p-rQxI}myySiHXG_Z1Q2^wf!9XIqk%DI&lwm1zoZhW z{{d0D{J_s1xaR;adKD99o;i+i%7*OBhl5*-&{|;B>pUShRMed zuOK6bpx86ptL=m7i+uOPQR>M;1rRT6=~x#VFqQ;gW*-V@zEFGdu*((LJjO+E=Lcwx zKKuoRjf7B`(a|X2Z_*RI00TCqA|ICDXnk!pQCNG>JMMus4gA}Aj(l?Z!x}V~za$o|h2%H3+&MO#TStAcCICIz(e z4FD$vAD|Bfn0r&afZtt8GzU`o&9e*anD#$D6d1G!uL%q3%AX4_7FvKSQH)G0WW&k# zv=+U$?SQS*!0n`up!4s~mJ=^X{8-!D#}6hJgqEboJZRa2%!=5+!$EG?Lk%XM2B-b0 z?(-w}etic|B0?qe#VK9wvpTqjH80__)N{2XIixAH^Ylj`OP*lT;+Yzfvf_;pXy@^$ z;Ad<3zNTjI-z@kj%e`k9;fq9=bvfNbg?V<_{)V@p*^{!fp@DPZU&_RjEx`irIA=e)x^6X(LGfxIB>Q_YArtng|EtO5N^nsSlmt6RaObt}B`NKd zRW=+d7zGc+>nZ5YL|BUt@?+cqWcMf<{o)c<-B$jopqHqTpu!p?3mc zhk+>gqmI19Oxu*Du5%An2BnzA;<)g59+8vr&J(7wlRUmCzYAfqxSY7B(Iri19*> z2wE1IbkF+&K;C6Y$#)p(VVKmxMaXfi z0x*^kp`46m&p|UAzv%noxnTSg+zZ}_v~L{{>my9!@oKG*cv3r?jKduRRAH)_q6IA(Q!8kG)ckpaU}HcO*WP? zCjMk7;EM;$XW0{r96AAF1)ursNY*ciur?uHajRlyP*{Jtu`i|!^RB-_nI!0M%A1F~ zg}?uFqY-PVo3=FTy3q=8WYV=wMgRlMH&2_N-aC_K)Gz)#wmkpU3^97xKmW(+P}BH! zFK7tg?IUP7D;S{;6hl4RN$^lB`IYB3&LPcmFllUxL^6am*OSwy7ODCVpc#DhDhi%d zl>f`4ufTS&2TB>4U}q1~=YU~K!Fx&+eguA<|Q7jUO<7_i|wYSi^`_Xy~JVT_*(?DGm zoX1o9>`7f{LlD$6X5*$B14go0Rg1+hn!5Mn3LiTDmIfwV8L};~lT8kK_T&SHOnkip z&yYhM1)<6EIHlJn*&C9@9=r?QRs3KXI(|s0vI&$W7mq5H$xd0}9R_ysQx7pbO>CPjRsUqD%o}W2km_sk1luay-b6Kvl}I zAek8iFaW5OoB}ZGJs4mq0|7W$*?*GebQ|D#H)kdYn1g2cW z#-b#Cj{R?TV{6ZgDrHm)X^UKK7l(pqI9!95B`RDw!Dt zW0`>54Q{^9cLn>&Z1h`>)#-9o`|*#M za%%LewNH;Ss}g3H(4nn>X}HwQQS;VtHAvxJ`F zr`n(@>of9c3Ebe+*9N?UTVB)0KerNv+}%$ejo*`BdaQ^oegrq*tCG&8X9DPPhK zZD9$Wg`GV9i#ytk=~217Z-({r*1~L0LJcl^BYG@^&h$<~&zg>|ZQP6Y3*&p>xHVhB zP5#;Y+&?X2_kC^7B+v}cd{-q5E^h|5de98q32(9;EhTQ&h27Z^MVwDd--E(Ow+8Y6 zNWsUjx~2Y|nFQ3vYybZ2)b!6P%tJ@`_pKt8=2PnXIc}o9>%em>SGqOBK`H z4ZG8KIOW8$@|kB32;GR0W*Yx+7m@IuL`dmGd_n$#oG_Sp{#%D5>*i6WwH2O0d-%Mf zE%U7~H24?v*Du7UqkYz&;Bh-FzW1{u9hOD(q3E`{4^ruK`>a!3N5?FGoHV>p!CijQ zwnUV%)3#WlQQ4;;`bJJ@KrRzM)VWbXf<7oYI!~Tz>R*RDOAzrKKj}S6sR-6)#9^Kk zA-D3?8>~>gIbMJDD@BE~*4a@4*HxCe=s0Pck_7@yz$_X;!;?~J8Gg@(Tr9mQtX%<4 z-#IVGD}N<`)$`w6LLuQ2e$Rp&iGqv2%GIhSjzhq==%={D3erxZxeLm8H>I|IxozdE z##2Iz)WZ*0p~!RUTy5xc8Lot(sB;@!K3t`v0JSTbI039{hNF&IE(MuKwbC4>pt@+w zxI;QCwZ(YH^yqbb&F0x15RSub8834X`kWcpm5W07Cga=;0gkC7e<_*qp?X}H4)@$r z+`dV5iX%|1G?h_&TW*vgs5hFF{@$`e!JA;ZeU=Je%A8@26Ti^5WIT@5)KOVs`^mi2 zGj7YVge?T|-)9}-pP~*mupVFQ=0v}zzjpvB$h`AuitbH191w_?jz44~y(cd%g-+E6xs-_O z3Rl6*APM9E5$1{EQ?^K9FDNi2)hKy9sY;jKT>z0uS>^C#cAj8w!a>p z#%+Bn5oMqZR4xrAz%h0dFI6Q)jNyEP5HScHtYqcI0QJ)FAOwUYu^uVrF1dWjO3&QV z99c@Gu@FO;aaX${QTS+fNU6|eiWowJD6=?`Zez68yQ|R^mLRlJK`unae<8OJ)tsnh zCSnYcTx}AiXqI0O#p1L8v#1IZriWIaGafk!rIHT+!`e(f^ygJ?!<^$Fy=N$;;kq(Z zunJNwbs?DUm{p7m>#{6u#V>5z5=!`5W=gU^423lyh1c}_O{`3)n$j&J@AgS3-K8s@Ug}az(@3Xe)5A=!kbdMeM;=2qjeGOme5(crIwp_DdBRHtnw3wSHrurRNQrj-GEOapF$myQ)?o`vu)LcIHg7lKXoUXFay(R zGbIq!Qb~wz4lB4u3#W>EezC_>CywyNSVifZM>;*VKAykA69UMDZMpb@`EDceeDppk z!Cx=b>$r!1Pk%hm+~kJutPV~cxHU>O9smo2BDz|k-fTyAP&Yf2NBO4?fNNLLPRH>b z^k&cMNj^MJ99RK^Kd`K67JY7*@|3s}nO`^?{Zu#FyC6!fe%t5p7l!iOIG!8ESXoS- z*&ARYVz}?nEfHZ!8nTvkpx;xjlOJP1 zhYJ?j#pP?E8~O&~s|Jq?Gj^#ZGCj3{8*V4l`3aTDQnF5>Nmj#Q3gPsub3EfpHTl_;N?G(>w;^U&Wx3hTRYaYP6hnHSukjXPXt?U{t5@ajBJ-kDxhO@1SyY~4bB?HeW`9+3o+Qg! zO6{}eH0uB8NJJ?y%guD)*DrrTIfQt57iL;wX`N~|zN)&IgfS7`MXr(QeD4v^*>EC8 zXW%pXcJFH-Z`SPa@md*oh@hoz$h|dOX}3Fqko#6* zCBCyj03h$1MI~qbW4(QKXE{J3tue>F%70G?D3VfCrun*ZxxT{Byz?e7hzpm2?^^!v z^PS4YbAcIv?gX_5+ZkJ>4p}?8qdXTz@pXI_#Yo>VN>ZrtCR$qL?OY;Fh?C#EC*s zw3f8slvR4i>8lx1SBPeJY2bs?&D?5x>p&mHMCg6?*iQeV`?5|;U5<==qaagMGV9}X zMB8baAF-tUdPZY)9&a4FV&rz27k!k@U_-$gabh9&IU~>HXtU1e$E!Eif+x}<{eNxc z344n4*9TKwuXx15iKLfuI=Dp}CW5_E0c{=bZfZ3%9VC)f4tUCpb{T59GPL3gr$WT; zk<|Yl_I3vRK778nX47Ze9x|-Q9dIy^Jja^4cJ$s58ZEDWGo&K_pfKgD+IwgiJDtyCzoUUYcaq=RC1k<4^z#$}; zNOGgf_rIQEDTlleh>4|oW$z8DKxS#rpFOoGd_BbFWAnP8H1Upbs%Smw&IqIX@k^ar zk(znUBY1kl-{9qbg8;cj*c{~qDnp~YqDYI`C`*fJK|?kPH&e$O*10xHYL~U&cXDR) z>02DnaLI$yv zNpW92)srU2Z&^*?y~V#p<7ccLQl>;$A136!V2CQhB9aSfi)n=&d_3&A;9u=6YYtUL z(I2fRJeByKw@${8?aA2TuH?HbXWfG~_NBW|o*0$p`x#PCj2+c}r^i?Ff+6_AZw1G~ zRf9jo!^S=WmHyhO%LJ>gTKy(%mj5<29t3DshvL)Tvb3-^;0q{h|pe!v=9b zF)LXeo}GyJW3-s2UZVLsCPH^%Fu(SNPnu2otxb&t9xyUwJKjK=;goEA3CH*_T_~+}6NxT(5BD&cD zvD@cc<}{yxXm&+&QQd^MTi)+G69Z03K{;OJ9*v}6f1z8^R1W2!WQ8N|#NTVAL`eIc zwCW=tkN$-+JF&f1t1L(qqKp@sKnW8k|-pb+C#$?90$LwlKQDsGE(*G%RoTv~#s3mHH!`@eu^@oLI7s@jf!!)izYq zzq3kG;$cfI2dRdB0I{b)O8?MjwFIjeR&Kmf2HzVLQgD6qqT{=!rKnmlBSvG0T0HI} zD9AcQ9Nv|(;RN=+)u{KGxk#7TEsL8mV;Yt6s^RwuEu)~q|x@pB9hxs z)X_ww2qe+kqIuU8@w;WH*_WhD_ zHO^))E&Sg9{-t2qS}u5i*+Xag=Jh_w^#^Oq0gNU1`@0hL%G=rKd=TeP8$k(zPhY>! z?AK-gCKBQXms_@*bcO#HsLk;HF+;_ zV)zUCB^v<&{(RSSVEpfzx$F?-hJS6?)!}kT2o>c@d+OdmFo=W;fW$PC!w5^r-s*hWA!W=?{%Z)k4eG8XBlGJgzpc$U;-(*ZI}2)-X$Mj4=Lws_Mv1 zG!ByHa1&7VI3Isy1(Rk*tw4!X_~YL}Ig1oKaKg|}%~49PO=~t3EEr3&R_5ufD_{Bq zqba**`srhqQJLH6l3w0zrnA++o{sl!n~H}|bHTVT!5`>q8cAWfP(6gCU!0X59-hg_ zw3x6om|mFmPPK$8SKRP5$cOqBVvpxh^@En0)wvyPT0o;Bpn-(Jpmt@~UysLtn5D&N zH-{Ca*PG*2h;r^=Gf~CG$?uwPYc#pe0t8-9bi{-VW9nS1W7~prxv}9@BM66|i|we@ z22<|_r^;VFk4zNo?=*y~Up)AjByCF?ByC4c&t2sryOd}p_d3qtnJP#Ts{FHR`=VV&P?E2K{IRjTPB@py`mSme7@(lrIqf`?|F=l8r1p?%jn(WC$Q&SQ#jr(;g6-q>?VR`%V%(5mQsN`$3- z`zU`?XyP3{|rd{(DvCm2!$tm;+?Z6i?s)R6`DGmcrU^! zKAdwBBR)T$EBd50rm%)~xyaRLCG4A)WOVtW`l%m!*!I=Jk`$b4ZFgC$kL(q@2TuG3 zmQF){)Yo_HPuv)dgS-f>nyJz&sD0d4tYbtMMK_b_Km`puk z0VX^1*fYX0vNdEo^qGgkb7UcXli!OJI#I2U9tBWOO7PJmYD7uSJq3tmo67l+j5{_d z^F~@{{?!I-ly+~mNJHA@vz{G?k8<-%&no8M=FhLkS!r!Bif%2cHdCB!yX1|`%Pk1~ z3ZD{6uDNYJ;R~0)<_Ac1R_^9F3IIeERy zYV^$&IzTxVL1`_Q?2himbq!ot zC$q>-ZIr!Y@9fO)A6WGYn08ISkNGMnRa){4nLmj~%c7Trg`|-y7mG8A=hU)$YktV0 zD_QuIeEfW>Fa6*C{V?yC|Gp+8@t}&#w_&od^x>pW6UYov_w5HJQT1H6O5gQd@~rDx zy#7dg^Hl*p*LXkb$(y|qYO8pV+S4#&f*gy;95|)^)TNIiS8*afbpE!S7t28|hhAgl z)?vK8VWhGwnoN2#-at9U2Oqs2xMOZ){_yOL_(AqIm?nGVJ(V~LN55;yhxHllVd;N!z%3d$@3hE2A|Rp>;7hS zOCT~RF<6?-R5`gi0)L-I-2RttzZGKhzKPkm>LKd$z5+9(Iq*x=BtVZDp$FGqiBYY* zIn3QbIBXCY24cnIO-zYU$@c2)KD;cYH1JE9G9A-B!(u|H8@ho@M07~&gl8(YPwFgW zt2HUEfTl^-F*nq1CbHe3;qL2DR;*Dqv3yo?BYmJfOlHO~t*IlPIWX*7C|z_9Xtt{-F9CsK1x_ba=t9?02;h)wnB) zcuubvaUAGiWc>0} z)=Kj@==3iSjLW-P4Ki>odUXb`w|?t#H_1U1{)&wYtV$M;;9dKAZsw3EQ`W$PF^F>k-n zU=wriyJ=ksb?kPESjTFHpXa$LJ9COVo>kZeYlX;$er#{bN0OAwk#UAKL+o1WeOs9y zw>p*P@hsA@KHDQs(#zorjs-Yr5%}L-5n7x0o9KoI8z?D~@!{dtzQe$&J$5jW+QHPn z!S*qc|BHdS>Y1Gx+X-yIj^?4$cll__aYf70S29_b(?5Qirgw}b%Z-ZX7{Sz?33?Nd z1dK2}&qF&KCf`Fz1WxArO^urKD$1boWE9W63Ody-zVXWsTc2o&7;DhhGb#|*Z@bQx zUR*|J2(VV;`4xUqcK<3=0bS#i?Xize_6fb`GVC4e*>&+B4e9>}3PJV0Q!sgwtbID) zZB|>$n2~2GCPF@Bzgl(1#Np|TJndty12e38j5TFF&=u~=XPK-`Gh`EGJTIFl)(cYGn`W2eEnPA()uJ( z=8&?5azMy+XAn|XZWfW1^-Iizog~NjlxO<7t!sw@<eQho6|H?$)zKs$^ z7*oHB9Jk$O+$}2U43ucW_i1CI;Smutp^9QAa4oTV_z@?yp;qj@Tq|DsD=q0c3{)3h z2TU!OQ9ltw)6UeJ;TIZOTpuoOssHSmE)sr2 z=!4(5vIY7sKW)*MuX3N4u@(d=7Q#lIWUdU+_9_Y?-;kh=_Jc!FB3q3|>*~$$aJeh< z|I;5*WxCtG6;tZB&C077H~-zdKm0yT#C&N6#Y6~9-8JoB`@_4_TJ8_+cvo_?f8?zF z;gM-A_lLWtwcH#>59gl#KlX>JsUqQL z_foJJlTh+D;a=+@HM{!r77_lL2k{-Hn2r2deN<;}$M zW+MNS>E67J(nSa%HM?&o$zfduatpFe{(k9bxswh)Eqsha3$pERwSh$QGcJOa@mIHHS$fEAQfr`4HKN;vaSNeZ5 zocs}D%6faZab|R#I7(8fNQEw?XxOkSeHw2EH?7awmg@8JWKq;7Kc}K1Y+4_`40J^n z1-c@uNqz2`EXw^>9hLhneq7PvmHrz^1F?*r%A{s)e-CYGi#lZeU2lfLJnQ;&PcwZW zFx8kOigxJ)DjGte?&syd6z4w~XpJ~%h|t>nkKHJHa*`H*e?A7hPZAwC$9p=1}K_vCmb_6T94)4XD{Ap5U+Gu-YU_VcIS+vF@iQ-6I}cC$00 zo1LM2GA%{rBn4LoR1T-{nm9I?EQV)nImF@gjepjgkp=&M`Hz?RKYSPEkFat6(;DU9 z^WXE&JX`+Sg5;L=ql}574C};DTlZSy_Me39F9Y=uhaM0@NL@Rs74^hP!W4}iDhJzB z6h&dG=@Hl3L+W4L3}0&qsgHGUBTiQ9Gh>?%LhAN(yJGC^(qKk*19J?UDEdp=Wa=+% zxhZLfNBZA$kiPZmV{+JC-pK|iRz{9nocx(NO!Y-=X-^Z#|6 z=hWJ?;M*j1O_$*p3k}L*zR9YN{x4sKaAB*oh(j!QYM_KmT>bvrvc&b=LtNro{*Y5? zygHGZK3D7iwk&b|F`-3?OTCj)%zKDST>jd)Dfa?XODBl@-+P?$M+i(E)|!Xz-}-F% zs|#h!U8g7}!dS`73VFK>IchD1L-n5pQ-<>O$nl}geQnT)ldNbV0n$Ur%)#F9pcYI8FBczeV zt?E#t{%<^QyfZ>;eI`y0vtUVKHZX^Oyco6qb7|E2xdCCvIO*T(WAaBx5)M@$$DNL| z$^aSe@h2%9LcS!`ioKTwsteZwQ!kK3jk|)15yxN6Qs11wo9LMd)I_N=)f{(Xcyyx) zqe*|iz9oLSah%9SSxmVg1b%YGUz?x>e^SH{WJ=ck501Z23+^Bq&5;qiBvHgJ!f1JV zMfT-3V2(S+iAqit2f`2vO>yDfz#LgJ!23cK|2|j6J9Z2`^Jit`p=-05LRSgA%(xZMxo!3p^M+6(8ceG8ig-2P1AVew|X=D^4ay} zVt=(UPa8Q_km9sflRrXgCfR=f6P=OIQ$~;9VvZ%-c~fb{qpk?- zE6f4r8Z3)5cAYq-u#Ok=3h95BLH-C^tSavc)C<&84~W9ODUO98giwE4DSa{bOw9Zk zdD8ghmwGd-HnhC{yhG*fKN{PAB)0#^=y_N=va2*bwyQoO^Iy&=e}tIj7+!$W!lH>> zewJv#H~7sgHCsl!TTBjj)8vpU{RD()caW9C`(+wZL)G&iB4w@SSc#Htd#6v%z$jcS=!| zY;AEBg={B7CR|4$6ZmD3MRaam1fIVno(_UdM--UjoBQ~x`nz@%2Vo=$nB&v?L~4g= z!GKPn&4M};4zk$g#H$@1OP>3q9hAkJ6uo@w-Zx*^*0q9=O9iRR!AEwG2n zu4?9ef=cc75tR@E=`c{73s!4^_2Oz0gk0z5v{JycMGG$HekhBz(}F=t>(_flTKK;w(PIG({_MOQQMJQO>YvWXDjv$pwL>#y8VwSuQPhseyZ_piB3SBL0mq z+?^|9eC{!k?|?Z*-y=#}b_bOfVK|)>HUs3x0Uy#wgyCUans;4%k0kyaLHs$`pQ9WI zOGYJJBuaQy(-Qu8w;<(H?-D5yo`s7oHU69$8-1zq)6~YvxmMgQiu6Sm6$#-#7wOqX zX&C*+ae;K~&-G?Fc)Df&JFrz#{(E-5Ym*hc)T9a`jPl*(;YL$Xuv!Z~%?&%MRd-UPbz}-(_HDp!VK#b*r5e_(@o&*qeT<$PN62; zokIKzpeM>w^G*||ZAHPKlfhe`L&000L%}m)UK!`BoSQM%bQv)BK?<1rpbW?g+yI`v z+QR#In(q-F_jD{^;3+=e zB&`);KVCDM4i#_*u%!9&!!KFl13EZgzl}#7WrtAK_#2Q}AJBr=aKCfGU)N3xrcv5` z@01rbyRMfPG`sSvsY!0@6EbT0L@c|&ofn8l3XRESkeG{o=@L3?ar6%B=)ix=<bQkg#EPQ?8}Szm85QNW7zLa25n(~A9JwVEkbUJNJ4Ik zh?b%56Uv&>r!fOk};4Z$91~(Wl%Cx??OwO})V0_)!nz!KP71@~Br*)I`u7LkpZrg__pA>2cg^3+VK1S6hP_ z*$<(tR>GqddMt-fmMT2{QW{Kt_ts$F*PG$TEe$~vdVfv&fr&`(F-Y$*DF2QU1L7@l zf8s5VoXQv^1ItSV=1t^xQfs8Q9hei?NGHF>#tPj|QdR2(LK$S47NHBSIDkXyL)ERt z>DI~|ce0I7=IV0cnTej+{1lKr6J@|ccb@Ua5n4U+E1sjV&&?HmkoXr?z`uE7xv}!Q zdNZt<&CZ_#Q*XPCbHPAss$4+WPT*Il8KeWt&ZGij$^OyajJDtnTMPFWYex$DRf)qI zRdiV6p!CnEC4Yn@<>KnFH{BioXry579nL4_-hsd1=6GKEzdn%s5oS~)LksRG%mk*+ z73r_;H;T9qLZsqEN%`qWskwqu*0lhTDZ71?vYX%IaHLEBlLyHkA?*VAXw%d#n!!h% zc??PSW5oL}$sn&gNAD-*aWfmCq@lc(3xqMA1W`UNq9q$IiM^=kW#`P8yBD(2WA6b5 zM+lx-w^8s+bO3X_cN@Rj@ZxPwCH!@RX&N*3)|=sL&Ndz{if`fmcFkkWW zFsgIJS=w_VZ_n3gf&9y@Ew|^6TU%_;j2&DYO{d6j$Uv(HQy_%E9IK>1TtfZ`Wz$R- zO(U9`T1Q^B&UYso4>)B5H4@rFPl@&Kt)d@gEu?;g&|35*oqMaG^(*%YTB8HlIw4H% z(E{mo{QZyNB4o(|5we7z@l4t@0$Zi`1_XKm_N7AX89rJh;uv~wIF(`JpfquIbWoZk zF2A55*@$tectwL>YrG4X`iO{HG+L-}Mz5t1@?N-W#Bec9{{AUVY6wL)Ydk$;XM0w3 zHhCM|kvLosfAkz8K0@I4ba2%ha(q(Rn${z{b93WH@@5fV=O!DUE3rL1Jcf?HeU>ke zzl~ZXkH4WSV_(MK;hl7qer>*>>7ZW(P0YTt~bLaXPTWS8`a|d*IPt`BnqWNqV+a?u!M82 z3K?RjSn2H?t;DYXnJZ_wlPg&QU)!)R21*LA0j4gN@g5f^XCB|%C|-*;vRkZ^tvoihL|UsTMd+g7P`H@LA+CaoYDXTFTbnERxtWpZpO*Nd0D& z*u{OFpFWvM*21zuA8co8W;c>KbcMQ6Cg_?NnQed#%LLXh9rO{2%+aA+Cj~+yoWbfT zLqQ+x6x6}`>O+&6(723xmSD(^2``n7}p>w48o1b ztRfcxb9sjeZgAy1;sywz<_FNL#OlOx@h~yspTB@cJVFRL8dj)EwBWqNJzqo#Mg236 zwiWn{HabSx$O=%VeJi6ijWF&IY67INSdo*_B`$;%kl}@7(^sxgEaCBR%m^Qh+U)L%+*xr5I?@A1CqxGYapy!{HQ zI6`0!m-JsPu3R7lem37zrG?hh<^Dg0icotE3WbpRtfxv_INcUwa*pMrAdxS~NP=QPxYapqTwgLKn;%&Lh9hbR7%axFTU4ozf^g^AWMoFFFLp-!4y_%M7R(He%mY2wibr;6ehk(7I{txeS4 ziT;Vji)AfpLEkMFsw0g{{-QzoNhAO1h9>+25K`w=v{sCUKkLnK9-mBlPr6B3Wf>r? zvJAkywuj?zC=D1v_M&R)&F}|jT0UPk^}c3%Z{2PZB&`(3R4RpSj!bB5L`2SYbQ=$w z*hm(WUNST}5OHLCy%`C%G@h@CZ$Z9Zoh!=l-U%uLLQ9Tbw3~}g>LeLzRT+gs2uvL% z{Vx_8K7Gj|4q09sH+kTF@4!C?DZMrzp?MTz$KAQZV4RHcR}71Ey~D;ncF) zpFmT%pEQ%t`^41Vxtjbt3oAxr=5*n~7v^=Yg$l0YF6jY$XP zL6u-h;UmD*ejPs@fQep7`VfZfI&(+7|#}#6a zbj2*$8?3X`k3TAx+`*DUOmVo3^z*9}2_auEqWmjAW{Q*6nZam7Vztx1=u2WaoWm~@WVXZOn0Vn?=c*zr^87U^P3xmq;>Zq5f>fcKNY8~EDIl~ z$N8{~2LGiz=JxQ%q{CZ#qbS_d$y7Lm(M4X=aNY8+>do*QXPT^ln~X2c=40;Y0#mP( z8GIv5e0(FUq4A?8gZLHcki(tUN&!#RlxwI9@m;UvDaavCdqwoILCR?fBFLQN5~mR_JO859D$!{^X@PR z5o4s-NFT{3Le~v9qEK0*1#YEqn`F52AEs~!i4F0SPt3JWhG@5hLLh{YtB}zPKE{y4 zF;kk#r?fpCX-YdtQ`(Ooi7AaQhGN7^*M^SByzX=&PvR327cu!GL?e=!YbkwB4er2c zj~9xIn5$3IVee!VF;juLP7W4@Eca6(5kkl{v@V8Ih>d)w(dL@drZp{$r1@UT?1D$& zw0A&^h)g5#cN`JBBLUD)gMPrU6CNIB@(c8FAGKDDKYyz?!{>6r)uLXE^{}r%S`T~e zTuXW_>oz)*dh%Zyp z#Idn7>oqH87ZU_cWkQ!T($eLOCbm6(f6Pv8I@z7m^JxbzYF$YjR=*I-IQH!4y{67- zR=zI>3UYKANF_x0e=6aPhyD*GoK;nC^40cP_{IN}mhQjEGA_8E%7_q{>XiQ93HwRk zaeH`bZI1ANRxExH8aoD@k-`JLisb}dy^r@p*ZtsBn3~>!Yk)g~?ih&v(YqX>tgC?( zyWJx7X5?q%{mObXoZb9BmpJd$*naew9Nq`Gp@Pz)A~IB<-YiRbTuiD6k^f8oo??C$)ibCH?X)OG2gK3mGICpse%b03#BjDdIYG`zB z!ZCfIQ(=zh+jmlMC6tdv($#STxjKK?wwUPrX~8v3qd&-_-y@?CaW{r^%xp*1+|CVEsKY6v*N??y#2*kXY?{$6Jfkd; z(JUU_F(}8W7-?M4AyuZG1`72m$F)4mHX_SR%JRG%K7=mf-+pUL-?$xhJ*f*XxcWp} z>tfQapVICE3B&yD*fu9jld11w7%W>*o*C2~ef#m~W9*uYZvQMcI)AEh%W;fe7>&*? zvb3kDJ)%*|HZ_a7gh%znL`?$SQJU>kjC<=$vqHVx-gFD-g>2#josZO$MV9{wk9hP5 zK`84n;c@(My&1lNPl;I_c$4ih_M=~fHZ6bb?^KMh&|?#MRQIQdAEMt>@(W_1m(k;5 z^2iZ^R-&I#&;2F}zq#nQiu`&BzwzkTpZrE*V*f#p<7a5!6g_T4kLS;rrg5;Wp5CrI zyF3?An(G(&%`WOLnbCDlIudx(@Ke#TnGT_0>DDWUMqKNddtm9{FO4xTYW0m-!_V?S zz|~mRJO5q&{6CiWK)of5%mHB#EE8B>x;P9mITP57bnqN2?2}O)sGj1?C@E@GFbECF zrbOkl2UcC~l8HyYBr435D)hnpi%&jifLTSJkGeCeBP)#or6_+it8nA_eNyxH5mIGc z`Ato`!PHZ+lSXKmGuk8%HU8!=O^zO}-f5TL8efh!UM;CN!&A<3fAMgO^v|?5EDNxT zlkLE+2hTAru#`XE5vbMz^Z8Q5{*wPN)^>rP6bWXi=EB}MA!_)eJZp4Od|A~iq0lTx7$ zi5L7$T(7&y#8?ULNR6>`Q@t7f??3l9Z9%VQ=YxT%)2|m5enVJSLU)qqJ(T6X9}v6fVaX4VP8} zOx<;zP|1#4Oe$G~5K`ZqB`%KWJ~=k`Ej|-vVab-pcMPz1c~ydcYk_g`v`Wkq>l}X4 z>tiaGsVy1D7ni;7x=ttrzqn2)1kr)Z)CK$F6{SKU9o2#@h-BL?2lg&c;^ck^&5i@V z8K=$XJF}kM+QO$fU2bdu{R?Sb&;LUl*h*={7RpY)&bn3@f;d`BdTO+VBRMIy5LZk4 z!m@$=E$tE8zQrZ%7#~}Rb@a}wMW8M0F6s$BPnouOk)kL*5;fZsH}i1~{A~ukOTy3G zWkV>(76;66k4G%qw+nkC+l9RmZpUvSKV251i_UARi+;B<=G=N}I4OH&@VU=Q>An)u-`6} zi+L3ZahF4Aqzyb@XbU5lZQ8fM3X!h+ts{IVz{Vf{7~^9LFl!%Mj9KSm)jzNyiBC~c zUbQJoZdz$>Xz9T-rs+Mo7ynC7_svb&?z2_+Jk_&%`RD+8H)>{X1H}0Ga8sJw!V4Qr z)2mlUwt>Did@(7dzrR+{W8xV>kBR&~DD`DYj|*&q9*>=c9#chjIG{Nt`JqW$4fLe( z?b)I&+#$5hncyka7H&o?Z9C`{;^_}a*-p<`*lOv>V~rSenEqi!_FBGKVXF#%#8q-z zIY$Dfx~>&0W|YvCB7~5-emx0!+@j@lo6yKvXQ}pIBRGS3E^!8g;<;W|sr>lo1@sXi z_)FV{=hm5~vAi!=*fRnF0<#FTBJdD_F$C@+@XHMx93n9D!+JCPLG(okV5;{Txl8h* z*d=*UQLJg$p0Oj63@quQYlOyf?0%uKL`OwJ$8<2uz*9DJA23HlZ!yRwh|2&7xoSvG zGEJ0?U-cGw7p$hd3s#G~F;mBzGQE|;G9NV|AlE2yfmEXesH^;`SMSh=)D|-m}Qc8*1W#)~OnNbpPLg?Z2 z?4rVMb_flyfnEc_>Zv)vTn}F@`qQ8!>Q4wkFN2L=$;Qxk!pR??^p|)DUA-J?4nx{8 z@Yf|ydOa-n|71~01W`*iDPr%ja$d5mzgZP=6m{zeCieof3W6mwdjoU*ew8Rf)mSP5 zLh#onPR;|ij1TtWf&{y0nn@o=JiycsuM(4^kIuYs2#nI!=kf8bw|5(F^L3&nk;nC+ z`<2&ou(G(`3@>Ra4K<_x&iEGW4P;*B#O%MZQ}zf2Gst%*XH;kGizGs5vMom|PKbw4 zpU=FZnbWuP{qqx`Z{_broP?dfga1@^?ztu2;886&m@}!~`>#aym!lOg9(SuSP{m!q z-XgwrVio}F{ecX&;O=L|IS}pw)_a}xsm<&16S`nt9Vh4fyG_#!|HUVV-Va&Xt@=%7 zhc1)~gk8MeL##00Wy&z$abxhk)2&aB{~|x3D{`@qInKG#I(K{Qa(V6+T@dP13^hl< zEC;;-cK+cF))sC9mi1$XG|n|}u{6##kYDV~sb7=BsBqMFA6tqv@p*R60v~hTm0?+* zYIRy#pF)?UogZhy9Id!>yjMQ~JAe13RCrVB#MC%xO?VldYC1PV90gzD-pebVQ^Z}7 zG#_)FNw+dFddf`D1)*+hq2`cRKL+}Kl-lU*B}>|pE|a^5$>h)lybNCbxG$vwtYXZ% z&c|F&L^Ij^!`Ox+r6QOZDO;m_wDJTNuN%;nMcei;;!7__NkWw z>zyUPKYnL}C=7R1%*}{b6q`@qDol0tKF11~__qv+t_aQpe_tWkMsY2%4TL_OxT>Du zHvT$g#+cOr=GZJ_JTP5w@9CV!T0ABiSZ@ed)tjc#F_Vw3c%hS<3HS^E&|~tA_eq3FcNG%W;p)D2b%{n8S9N z$W*_E&lR zrQ3aasdwiOG^^~$O{wwPN~2S`SKr~KsoWRpw%l8DC^zj}U+7NLOS}!)Mvu`vzXbO{ zt@w5K138%E=L3t#>6JWiYMi2Y{iW$SMZOEoa1Y+uvM%l=y68){t>rb#DdMi&V0Gao zz_MD&?@RO?$-vQ7;pc24yr1uVWgX}#gJlho!O%q-NapygCozGy*L^RZxI#yyhmSd4 z=xNca{ZEos=;D;E1pPP!OSA>QD2gx(#vBj##IgQ%8*$Q#JK)2p?6V6b!4j5xYYzHQ zs>w|&^@RqTV#PV@S|4*=+S6jT-k&71MVDNb`uo^oNj8*7cH&Y|*C#5T7yOaCB*}Kl z?@PvYm9#*YB$=I(dEE+jbf*l4uCPJdiHk1f4DEMe)8lt;Bj3+^sBJsw*hAD_>;J}b&{hYjh7}-*!M>m zc^z2NVebF#+IC_zt!*cUTX@MC(=-NO$(eRG0S|%a3G^he`E``%>i$ihd#)w>DPz(^ zjS|Fzp$X!_(B!C~uLZm|MLbm(tkxEXINNqzmL?cQm-C2GAdF-IH*&!o9n!?C^@w

|kLL4?1x!7DiTL=6IAx14+|x8otbN)S{Y)*%@hJ*MUFc;cB*qsxkON*Zv{~SA#Kpnx&u7hXT1%~<-0^wxbiQma3#0x z;JRA+_sA!Igfx&e|BW`BYCGHKp6;y$|Da}xml14NQ3Qk#%CiA`k}}+@1Ap}ci4*=$xC!@SB-mZF` zzigBzSH7B`k)&GUekDS!IKkehH;N?5+KL!4V(8_3Y{TVJnrN=ywY9Jxzq*Gg;kAdU zgx4OHVqCCfR(D|PR2kxGVfpatQl*h?L`QxPxv)Jd7Pd#_@Q-$o_kp>3_TU6x*Pg1g zjx$)7OIGtG@qQS>=w`*m^_HzhZb9U*Ta+iUPH@ILDsiejH7Se}X4x8-Q+-{={M<`1 z5w;Zb97Rzgn~ilZ)tliD&T21E>_;_T?}E(ZO))7T1aUNw)AM2(!n~J4AZ#SRC%6SY zFB3<7FB3<76PwU;-$jCQ37d#=30$M#DwqEJEOL1fhoFm9e|xu4_FL?d!FYFli~CVc z_{oHeWYq?Ustt(p6KxSaFzXn6ktlpuv0Uj|+l*Uipw7h*k8>1w=fA(b2o? z(&(^_(x0iPQn>nLr7)zkvb`d;iCm)|hhlUpf0=luJJGAxLfT2tAGK@6h3@3+qDf9* zlk6xaFWEvm^jA{kLFP5`AoCjhaHE}1N)@ovK%p4UH6f)Ib{@oE&rvPVgO0zCO`%$B z{487CB>IfocT|VT4@(Y+59k0k!0AmZ0Tu`BQG1S7e4`UsyN^lfa7aks-juyQ&#uA= zz#Q#TsP8;^6TL~yA+WYk^8=XG{MNbog;spC6Iky9$xamK7vyPDb%QRkX;a|NEXsFh zL(M_nhB-X2cerxPT$ZS4Opn~{WMt*Octxj7m00i$Z0@SmL%Q>DSNO9EBli!vbet!SSMV6 zrCY`j_$Da7>mZ+=D$2p)>lNV%%@lIL)anaGH@j;Eb+fxxP>-<#v-3xtP&_N6EBfm$ zU+lC(9_ch2s3@1tRN3y+qdNfYL*D@R!yCb3G*kj9VuHpo7ZR$12&?|7Yji;Aj|t!}Jp(Lhs0@&Bo(O;rD%%ZK*)|a0>n^jtJ}9zA2b3O)1Al1) zFqiEDY=d{U#EH*a;$+GCijqz70ZZDOBr^T1hsYEiQ1c@nE=eED&rb{!pV7hJ&eWZt zgB{Yo;9HC3z>*frz|*c2fzc7!2+TD(Nm43XQYu?f!uhYozmj(t(IFO9u4`r7 zBd>|L=m^iGv)kv&xC7pSQ<41GV8Mw0cvLEi@2hOJ(GJ7^nrg0`Vfjt1K) z(04=WFUer5(f8_gVB71{HdJ^^jbxv#>|_33kZrFB0J@##@OZG(2^na7T%f6ZvBcKg zUYo->Uret{zozMR>DP+bdS%1x;4ixzPVR+qJG#NfR$WGpo~L#h$#*7x;d{cRr&UPX z3!yaym^Sx`WI<@wnIR;JW9R z>G^JF5kLOc-fjGD85h+{q4Z!ps4rlzJGn<3x%4$Mtar3K7q9iF>dkO|%l62RkUZG< zOm60Rd=)>T19dlqK)09@+(tWb_F0%u^Pd`_`PT@oODcpCyh0C0<>RWDkZad+P0@-A zoK9ecTkOF4Y!RoA3Pt6(T^hZc+h}jqFywf4xuyW?Tna3w)Xq=3XvMyE&gjm3kvIm! zZ+aLtXXLV09-S74evK4Phk5Jj&G4BfGf)$H+}FB=^CYR~iUHVHJnq@|m?F=U(B$+V z{-T4!OcWhGC=8SZxq&jrkwk9oY>luXzNVETY^h<~PbhA=jN45Z4DUt;!(WsBaW|7c zLSEm}T2pL-BR2U=OuLNX1MB2oU~O>R>ov$2c1A2Qsji>CCs!}^?mXyCsr98*49qC$ z$FC(t`ukW?W};K+7l zR(beeyISF4ybO=7T+f!ND}2oHMORC)S35*1_R!^Hjt$Xo57fzT=<*@;ay?br;ujM5 z11iC;mXa=Im{iiCD_1YeO{?NX@(;lxg_68&G-W7DC?6Os(E{nJqImUk(D!8Q=y&p{ zUe-_BP?Cny@0DG#)pNAsXQ%unbe`NL_Fpr(6~c|lNVLv5p-Jh!&`fvcprXP|)2ILF z(~aPcsa_xJ5$O`6bba-DOXtLFW^?J!owz#{JQ>Yw}44q`T%|6ESUNp>H*uD@11vsb3ib9B->;W)Z{ z)}xCc#>>wR=2gfk;x2C}$9<6VpZX~S$3;&j((B)$;fArhh?(!RVQOCZ(7rH-v zV9~v*0y_^wN`p7;IG80~v)ZTc%GLMs0+;gFc#<+*R-b+N71?LeB|3PnzSCRtW4})@ zUGL7HyeVgLJwHyn-N#%Fon;}OFR2nSxy#2~KSsY*r^#>Vg3#5)P;=O;ABUa4LrQ~> z-EH@=OMRY$+Wf($X@>jJg~sPP3pVubbHs)aR**4_m}4eB=9t!5j+-av^%b<>E^H=X z>WI#wHH)s>ER@RJ#Sbj*JOn9q(B=d`ZsW5KK>7F`R-{s@6vaA1%KW;I_Me{qgj&feRfma_;^6bx3+%me0nO@8k(L}*}#U@G*~ ztEWcucOzw7ySb;+3pW6BJt`wy-kTyI1hbSsr_0p;$Wq-Z)LRGz6>YJHb<_+Q^2jO* ziLg1nfvK&}k@Q@7K+==Dc-IR*)?k`G*5ijxF`S@JXNwb{GgmL;Txf4UeuXn*U-;2F z(;S#l68?*8fgLY&k_JCAXc|DMjVM8GKT+%0FHtMbb{6$ZZC(>2d>)_ve&hg{dRr$^ zFTb$qh!8^Rv0E&w)z_5E=R4N%MZWPOrSUX@Y3FmWRHoQkrg-HMN)aK1)Muh8eqAP0 zypd8ov9jI_&#wKK^PShmx1i5o-BHL+tH+V-giy$nC~xdMNE6)-e#R@`oeG{ZZDBDj z|NhfaETP-bZGR4dIR;4od5@F-yvG%Bqh0?37xSJ{ZDAiuqpJw}cJi-c37O2FWOOt* z#Rr>M4RQyB+Mg#?m}PO9qE~!Xo?Z$1?KX~K=*t@fe^-yu)fv^^Ax@c z%#|o3jQbx&KnTpGNdGLcGl39-)wAvZ<~XE@4%}-bb>Lq7l_ffd9jw+C>`)Y?A}#|e zvlB@lm5uWA>zb&i-j(W!up-9|Hi)%AAJjzF=iNwIBcwVg;HjDXuvC!QBd-EeZs#W)A*4nm)1d0efTh<`f$Efcb#+y|Bv4w|Btyf*)DU3Uv2P^|TC$!)zJoqR(u!rs3V_9qLj-#Af@x#vS%oEzm^1wbV z@LwJ*X{JLg&L=z~7UvTl5y`zLY=qqao;odft(EL-J2wbR*cF8h2ly-anLL2d026{crVa#_bg;cB+wGa6Z0G>JJoZH1 zE;#ukoyhC69BPh0S_40k_hz^ok!@{zQIhyVDhWae<%PMpU*Mje@pq*4z@mfDw$!rguxq5(L2E|s&Vw2Gvr$p~CwSXRVg`DmUlIM}INw}L zUF-fZMpil-n4z((zlOYl$-X$z>J^z zeadD10Y27yavPMkS`E~SH!b=YJALKABJT3B-Xo&lj^xU3=<=GIhla+#p?GV4_WI9s z)IsJs>$r<+Wqi8Nor&_lUCRHjZn<5C zLYI%_yQdG#_$_j^k7a$rL$&tlM+a)fAFMwjmTTPQH9yCNZ>Cq@tnUIcwX!AG^RpH@ z#TPEU=h&@cdgsVuqAOQFoSXJ*ZfK_4^!m-tn*SrLS19X-(;bwd?A{}_1A{+lfv4jX zML&w`;6J_WW_vDc>-Bu2`RB$dN+>UVKvCzke%gl3X%)CoIqXZR09%94vtRQMq&)l$?YgiO)qdKW7>L-JJoR5&PJty89#~lgB>GViBY@zVKHja5$?q` zNJ-a;g1qbaT+|pH;b-eiGh<)asO8`Xu6v>xHGKRu_nS(e9*X_6o3CEf!>Z^SgCC%- zfsn6Wa@^Ckj@gM?vDaAyE%0BXX9q6KxCzt~=t4jvU?*TBu%@Wq47*MWv8UK2P8~gY*AX#r(`&I2rm_ypIh@ z%nc1Qy`CMD-UwII>%%J&obr+3ni}8^ zGD(^%{Zq$~KSJ}^T{A?ab$s=!k*4QI-q z<3&<`i30?G@dE_vSJJnYP5*WGQqC_0WaZM{ooZs#!>8GtK1HC3! z|H8X7Ox{1`ru+cO6jbN zIc}$M;LUmyn~6dN-1MGGfYR0j?t=nd0<*bIv7{F;Fn@nwV!oInxC>a4Zhd;AQhq|0 zR-E+A<_-$3VxP2Av*p+SB=8$IPkY-t69F0Op z$AEjHfO7?*s}KhF_0!gsQ)K8;p?y38=Ks8tPJKl)~ z`e2(3gs!=#25SqK#-n4P7TnDxbVnc>deV0!{Rgobguv88(tq^| zp|o5f27^tpdiTp#?`8oSi8!wQA8FqnA4QQoKAY^209hs|k%&Zt4hp_5c%WG&5_EwM zE)p~-3M#%3MDKE%2x=5?H?ZSm0F^rtqv!d=H=bNTMBD&|@DK@{@=yU$ao6|&ACN$j z`F-lqvzxGqeDB8}RHwJ6ySl2nySlr&3W%2pY&%(4WqP)-mLkbvO*jB+!huNTVi