diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 00000000..da09aade Binary files /dev/null and b/.DS_Store differ diff --git a/Gopkg.lock b/Gopkg.lock index b80ad198..42bfd69e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -64,6 +64,12 @@ packages = ["."] revision = "553a641470496b2327abcac10b36396bd98e45c9" +[[projects]] + branch = "master" + name = "github.com/krallistic/kazoo-go" + packages = ["."] + revision = "a15279744f4e4a136cc4251ca8be36fc00798f2c" + [[projects]] name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] @@ -124,6 +130,12 @@ packages = ["."] revision = "1f30fe9094a513ce4c700b9a54458bbb0c96996c" +[[projects]] + branch = "master" + name = "github.com/samuel/go-zookeeper" + packages = ["zk"] + revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47" + [[projects]] name = "github.com/sirupsen/logrus" packages = ["."] @@ -156,6 +168,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "24fc175e726a44a6f651daa65ac013bee7a1da9eb6daaab6c47410b0f19f0eac" + inputs-digest = "1e33ebc738ac41c7c7e4a4a6bd1d52c12483f2f7b1b3f408ec8b6524252797d3" solver-name = "gps-cdcl" solver-version = 1 diff --git a/kafka_exporter.go b/kafka_exporter.go index 08ecaa8d..127e8902 100644 --- a/kafka_exporter.go +++ b/kafka_exporter.go @@ -12,8 +12,8 @@ import ( "strconv" "strings" "sync" - "github.com/Shopify/sarama" + kazoo "github.com/krallistic/kazoo-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" plog "github.com/prometheus/common/log" @@ -41,15 +41,18 @@ var ( consumergroupCurrentOffsetSum *prometheus.Desc consumergroupLag *prometheus.Desc consumergroupLagSum *prometheus.Desc + consumergroupLagZookeeper *prometheus.Desc ) // Exporter collects Kafka stats from the given server and exports them using // the prometheus metrics package. type Exporter struct { - client sarama.Client - topicFilter *regexp.Regexp - groupFilter *regexp.Regexp - mu sync.Mutex + client sarama.Client + topicFilter *regexp.Regexp + groupFilter *regexp.Regexp + mu sync.Mutex + useZooKeeperLag bool + zookeeperClient *kazoo.Kazoo } type kafkaOpts struct { @@ -64,6 +67,8 @@ type kafkaOpts struct { tlsKeyFile string tlsInsecureSkipTLSVerify bool kafkaVersion string + useZooKeeperLag bool + uriZookeeper []string labels string } @@ -103,6 +108,7 @@ func canReadFile(path string) bool { // NewExporter returns an initialized Exporter. func NewExporter(opts kafkaOpts, topicFilter string, groupFilter string) (*Exporter, error) { + var zookeeperClient *kazoo.Kazoo config := sarama.NewConfig() config.ClientID = clientID kafkaVersion, err := sarama.ParseKafkaVersion(opts.kafkaVersion) @@ -154,6 +160,10 @@ func NewExporter(opts kafkaOpts, topicFilter string, groupFilter string) (*Expor } } + if opts.useZooKeeperLag { + zookeeperClient, err = kazoo.NewKazoo(opts.uriZookeeper, nil) + } + client, err := sarama.NewClient(opts.uri, config) if err != nil { @@ -164,9 +174,11 @@ func NewExporter(opts kafkaOpts, topicFilter string, groupFilter string) (*Expor // Init our exporter. return &Exporter{ - client: client, - topicFilter: regexp.MustCompile(topicFilter), - groupFilter: regexp.MustCompile(groupFilter), + client: client, + topicFilter: regexp.MustCompile(topicFilter), + groupFilter: regexp.MustCompile(groupFilter), + useZooKeeperLag: opts.useZooKeeperLag, + zookeeperClient: zookeeperClient, }, nil } @@ -185,6 +197,7 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { ch <- consumergroupCurrentOffset ch <- consumergroupCurrentOffsetSum ch <- consumergroupLag + ch <- consumergroupLagZookeeper ch <- consumergroupLagSum } @@ -289,6 +302,25 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { topicUnderReplicatedPartition, prometheus.GaugeValue, float64(0), topic, strconv.FormatInt(int64(partition), 10), ) } + + if e.useZooKeeperLag { + ConsumerGroups, err := e.zookeeperClient.Consumergroups() + + if err != nil { + plog.Errorf("Cannot get consumer group %v", err) + } + + for _, group := range ConsumerGroups { + offset, _ := group.FetchOffset(topic, partition) + if offset > 0 { + + consumerGroupLag := currentOffset - offset + ch <- prometheus.MustNewConstMetric( + consumergroupLagZookeeper, prometheus.GaugeValue, float64(consumerGroupLag), group.Name, topic, strconv.FormatInt(int64(partition), 10), + ) + } + } + } } } } @@ -427,6 +459,8 @@ func main() { kingpin.Flag("tls.key-file", "The optional key file for client authentication.").Default("").StringVar(&opts.tlsKeyFile) kingpin.Flag("tls.insecure-skip-tls-verify", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.").Default("false").BoolVar(&opts.tlsInsecureSkipTLSVerify) kingpin.Flag("kafka.version", "Kafka broker version").Default(sarama.V1_0_0_0.String()).StringVar(&opts.kafkaVersion) + kingpin.Flag("use.consumelag.zookeeper", "if you need to use a group from zookeeper").Default("false").BoolVar(&opts.useZooKeeperLag) + kingpin.Flag("zookeeper.server", "Address (hosts) of zookeeper server.").Default("localhost:2181").StringsVar(&opts.uriZookeeper) kingpin.Flag("kafka.labels", "Kafka cluster name").Default("").StringVar(&opts.labels) plog.AddFlags(kingpin.CommandLine) @@ -517,6 +551,12 @@ func main() { "Current Approximate Lag of a ConsumerGroup at Topic/Partition", []string{"consumergroup", "topic", "partition"}, labels, ) + + consumergroupLagZookeeper = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "consumergroupzookeeper", "lag_zookeeper"), + "Current Approximate Lag(zookeeper) of a ConsumerGroup at Topic/Partition", + []string{"consumergroup", "topic", "partition"}, nil, + ) consumergroupLagSum = prometheus.NewDesc( prometheus.BuildFQName(namespace, "consumergroup", "lag_sum"), diff --git a/original.log b/original.log new file mode 100644 index 00000000..2bb5568a --- /dev/null +++ b/original.log @@ -0,0 +1,744 @@ +commit 800e6b0262ac7c4c6ab42e32a0b287af1ed22a89 +Merge: 98133e4 fb93caa +Author: Matheus da luz +Date: Thu Jul 12 11:13:46 2018 -0300 + + merge branch 'master' of https://github.com/matheusdaluz/kafka_exporter + +commit 98133e47022fa07cef931a1570998191488daf23 +Author: Matheus.Costa +Date: Wed Jul 11 16:18:28 2018 -0300 + + parent 54278f0587bf8e2fa7c0b879d1da244535572e3a + author Matheus.Costa 1531336708 -0300 + committer Matheus da luz 1531404395 -0300 + + get lag from zookeeper + + Fix + + new package + + krallistic + + merge + + if removed + + dep and squash + + new version + + Test + + package krallistic with dep + +commit fb93caa702d271e920c770cd0f389d6aab1be19c +Author: matheusdaluz +Date: Thu Jul 12 09:45:51 2018 -0300 + + package krallistic with dep + +commit b0f592ffccc43f5dfb9ad54b02d504c0ba2601ff +Author: matheusdaluz +Date: Thu Jul 12 09:44:33 2018 -0300 + + Test + +commit 2395431b48722fb1bf0b10158c5c03747a44ed74 +Merge: b6d2058 2ebf1ad +Author: matheusdaluz +Date: Thu Jul 12 09:41:25 2018 -0300 + + Merge branch 'master' of https://github.com/matheusdaluz/kafka_exporter + +commit b6d2058c3bea3203f6b0c0238c879e1744c4ff55 +Author: matheusdaluz +Date: Thu Jul 12 09:27:06 2018 -0300 + + dep and squash + +commit 2ebf1ad53af8cc75eed35e71b6a804ba953167af +Author: matheusdaluz +Date: Thu Jul 12 08:55:20 2018 -0300 + + new version + +commit f3b0ba7f550826c8920f414d0230e9e1e4c71507 +Author: Matheus da luz +Date: Wed Jul 11 17:45:44 2018 -0300 + + if removed + +commit 7cbfa20f27f7289d4c408f3a49b6e16889637f39 +Author: matheusdaluz +Date: Wed Jul 11 17:06:51 2018 -0300 + + merge + +commit 1dccd2e31cb00cb236673077241e337068467daa +Author: matheusdaluz +Date: Wed Jul 11 17:01:41 2018 -0300 + + krallistic + +commit dc3c9fab01f9a98872c3ff59ce120867ca72cc5f +Merge: 440fe70 abb1be5 +Author: matheusdaluz +Date: Wed Jul 11 17:00:16 2018 -0300 + + krallistic + +commit 440fe7047c597a81354bfb68062398eb73dade43 +Author: matheusdaluz +Date: Wed Jul 11 16:59:36 2018 -0300 + + new package + +commit abb1be5bed8dd3b8256076edc3877475fc601f55 +Author: Matheus da luz +Date: Wed Jul 11 16:22:54 2018 -0300 + + Update kafka_exporter.go + +commit a0071e741e8ad1bfc89da01ddf552ed4212a09bc +Author: Matheus.Costa +Date: Wed Jul 11 16:21:37 2018 -0300 + + Fix + +commit 522202e76c9fbc1e6ce01651ec141237fc7587bb +Author: Matheus.Costa +Date: Wed Jul 11 16:18:28 2018 -0300 + + get lag from zookeeper + +commit 830660212e6c109e69dcb1cb58f5159fe3b38903 +Merge: 7aa9c77 1c42bfb +Author: Daniel (Shijun) Qian +Date: Sat Jul 7 22:31:15 2018 +0800 + + Merge pull request #47 from danielqsj/rel-1.2 + + Release 1.2.0 + +commit 1c42bfb297ce7e44f5139b1e3fd0de541dda40ad +Author: danielqsj +Date: Sat Jul 7 22:28:32 2018 +0800 + + Release 1.2.0 + +commit 7aa9c7729fee557ef7902577fe9215ff2ccb24c2 +Merge: 54278f0 ae33f02 +Author: Daniel (Shijun) Qian +Date: Sat Jul 7 21:57:03 2018 +0800 + + Merge pull request #46 from danielqsj/sarama + + Update sarama to v1.17.0 + +commit ae33f0266dbe7480db480c8d6f614bb3256fc4b8 +Author: danielqsj +Date: Sat Jul 7 21:16:19 2018 +0800 + + Update sarama to v1.17.0 + +commit 54278f0587bf8e2fa7c0b879d1da244535572e3a +Merge: 112ce08 0640680 +Author: Daniel (Shijun) Qian +Date: Sat Jul 7 14:40:12 2018 +0800 + + Merge pull request #43 from gburanov/gburanov_error_fix + + Check for sarama error + +commit 112ce08b41727f68c7650d8b56c7a35829b5d49b +Merge: b87796c cce2b0b +Author: Daniel (Shijun) Qian +Date: Sat Jul 7 14:37:01 2018 +0800 + + Merge pull request #39 from piclemx/master + + Adding filter for groups + +commit 0640680ff004512db7da5e14bed0d5919daef38d +Author: Georgy Buranov +Date: Mon Jun 18 16:58:34 2018 +0200 + + Check for sarama error + +commit cce2b0be679af0ee9829a2d01f59ffdf8c9844d6 +Author: Alexandre Picard-Lemieux +Date: Tue Jun 5 11:34:07 2018 -0400 + + Fix flag description + +commit 248eb8c35a93029ab7bd6ea1dfef9a5dffba8594 +Author: Alexandre Picard-Lemieux +Date: Tue Jun 5 11:26:02 2018 -0400 + + Adding filter for groups + +commit b87796c32ab4376fe6d47fb17df6a41cca21b046 +Merge: 84cee7e 6f5328f +Author: Daniel (Shijun) Qian +Date: Mon May 21 16:55:04 2018 +0800 + + Merge pull request #34 from chenwumail/master + + Create grafana dashboard (Prometheus) + +commit 6f5328fe3224d4aff093fdf1cad35a0d1e5125e6 +Author: chenwumail <31205300+chenwumail@users.noreply.github.com> +Date: Sat May 19 10:46:58 2018 +0800 + + add kafka_export_overview demo picture + +commit e40175cb71faecb34a5dd2589c937adfeda92767 +Author: chenwumail <31205300+chenwumail@users.noreply.github.com> +Date: Sat May 19 10:32:53 2018 +0800 + + Create grafana dashboard (Prometheus) + +commit 84cee7e0672f0161c05a93557cc2794a48c8a024 +Author: danielqsj +Date: Sat Apr 7 20:13:17 2018 +0800 + + Update pic + +commit e3f66e2905895e7e8bd6d8af6ee7379f5c867472 +Merge: ade40b4 5a76c64 +Author: Daniel (Shijun) Qian +Date: Sat Apr 7 20:11:01 2018 +0800 + + Merge pull request #26 from danielqsj/fix + + Update pic + +commit 5a76c64442509c6a898a243443d50bd37b9522ca +Author: danielqsj +Date: Sat Apr 7 20:09:28 2018 +0800 + + Update pic + +commit ade40b4bfb38f751388524c825c33cafc13976d4 +Merge: 47a04e7 7cb402a +Author: Daniel (Shijun) Qian +Date: Sat Apr 7 20:06:26 2018 +0800 + + Merge pull request #25 from danielqsj/fix + + Enhancement + +commit 7cb402a8d60b5b060499036f4ecd1b3f4f5f3347 +Author: danielqsj +Date: Sat Apr 7 20:02:35 2018 +0800 + + Release 1.1.0 + +commit ab154640fce55d9b5cdc53dc22b3bbe8c344936e +Author: danielqsj +Date: Sat Apr 7 17:44:37 2018 +0800 + + Accelerate metrics collecting + +commit fc92a2eae436621a4399a9c7b85f64360469ed0d +Author: danielqsj +Date: Sat Apr 7 16:48:13 2018 +0800 + + Fast return if error when get topics + +commit 1fb0d449508a6d832b0263d88d50b8f75a1f0031 +Author: danielqsj +Date: Sat Apr 7 16:25:56 2018 +0800 + + Collect consumer offset from alive brokers + +commit 5d63af095c72d59af931b4211a1603a03161a71f +Author: danielqsj +Date: Sat Apr 7 16:05:48 2018 +0800 + + Support inactive consumer group offset and lag + +commit ca0094286ffad3b2fad35d7fadef6280632d87a4 +Author: danielqsj +Date: Fri Apr 6 20:38:21 2018 +0800 + + Remove unused variable + +commit 47a04e76e00acc082c63bf988140362b6e0a917d +Merge: 501fb60 1cbb590 +Author: Daniel (Shijun) Qian +Date: Fri Apr 6 15:35:58 2018 +0800 + + Merge pull request #24 from danielqsj/update-vendor + + Update vendor + +commit 1cbb590176de488baa0e60ead6f82920c539b928 +Author: danielqsj +Date: Fri Apr 6 15:32:12 2018 +0800 + + Remove redundant alias + +commit c005f471dcfd8db9172729555dc906f5c3f03ab0 +Author: danielqsj +Date: Fri Apr 6 15:30:14 2018 +0800 + + Update sarama to 1.16.0 + +commit 501fb604a53fda51c9ed428c47b1f2318a3677c7 +Merge: 135c2b6 83d5579 +Author: Daniel (Shijun) Qian +Date: Mon Apr 2 09:43:40 2018 +0800 + + Merge pull request #21 from gpaggi/master + + Make Kafka version configurable + fix logging + +commit 83d55792c2c9f4fc10be4a4e56c87b653a4a015c +Merge: be4dc1c f44f172 +Author: Gabriele Paggi +Date: Fri Mar 30 14:23:41 2018 +0200 + + Merge pull request #2 from Crypto89/master + + Fix memoryleak in go-metrics + +commit f44f1724cb243345c9aec3d42a6d3a03bdbfa8f0 +Author: Jorn Wijnands +Date: Fri Mar 30 14:20:46 2018 +0200 + + Remove pprof + +commit fffced7cd57ffa68ed054d8df8ea1296f1d9fef8 +Author: Jorn Wijnands +Date: Fri Mar 30 14:18:02 2018 +0200 + + Fix memory leak in go-metrics + +commit be4dc1c3b33f53a0bb7318ffc9298effb07bc0f7 +Author: Gabriele Paggi +Date: Sat Mar 17 17:22:56 2018 +0100 + + Release 1.0.2 + +commit 42042a812dca400eec8b6b2f40b23605df4bb4fa +Merge: 3ea1455 df19c87 +Author: Gabriele Paggi +Date: Thu Mar 15 14:18:33 2018 +0100 + + Merge pull request #1 from Crypto89/master + + Make kafka version configurable + +commit df19c87d3c6ea1310de824e738dc9bf9505b53c3 +Author: Jorn Wijnands +Date: Thu Mar 15 14:14:45 2018 +0100 + + Bump version + +commit 9f7cdeb6aa6b2d2eb913f00047ea021e398c7d29 +Author: Jorn Wijnands +Date: Thu Mar 15 14:14:03 2018 +0100 + + Make kafka version configurable + +commit 3ea1455d737c5e2c3a5b7fe8f1287e76fd18d66d +Author: Gabriele Paggi +Date: Thu Mar 15 11:52:46 2018 +0100 + + Version bump for release + +commit 135c2b6b456ba7d6fcc124145db0d4036cfc9b01 +Merge: f0203b6 ada1715 +Author: Daniel (Shijun) Qian +Date: Mon Mar 5 11:28:18 2018 +0800 + + Merge pull request #17 from gpaggi/master + + No offset associated with a topic-partition under that consumer group + +commit f0203b61d2b3fbce53595550a6cd84d59d1146d1 +Merge: acb4af9 fb848e3 +Author: Daniel (Shijun) Qian +Date: Mon Mar 5 11:25:26 2018 +0800 + + Merge pull request #19 from daveworth/cleanup/use-integer-formats-in-logging + + use integer format specifications for partitions + +commit acb4af98b9c269cb4d5b9dadce04a9c4ffff975b +Merge: b5b6ff7 5c60ff5 +Author: Daniel (Shijun) Qian +Date: Mon Mar 5 11:23:36 2018 +0800 + + Merge pull request #18 from daveworth/bugfix/typo-in-offset-error-message + + fix typo in offset-fetch error message + +commit fb848e32933549a923da326847e5c7ff8dc0545c +Author: David Worth +Date: Thu Mar 1 14:17:50 2018 -0700 + + use integer format specifications for partitions + + One of the metalinters was complaining about this and it will + make the semi-confusing logs more obvious. + +commit 5c60ff5f3d66aaf62042286af5cf29a4b16b6cca +Author: David Worth +Date: Thu Mar 1 13:55:44 2018 -0700 + + fix typo in offset-fetch error message + +commit ada171570872b28b2bcf0358dc671460f8dd9e17 +Author: Gabriele Paggi +Date: Thu Mar 1 16:58:50 2018 +0100 + + Cover the case where Kafka will return offset -1, and no error, if there is no offset associated with a topic-partition under that consumer group + +commit b5b6ff7dd341599a069d9e3c2abe41556a055fc8 +Merge: f1639a6 6d9e3d4 +Author: Daniel (Shijun) Qian +Date: Mon Feb 12 15:23:29 2018 +0800 + + Merge pull request #15 from ekarak/patch-1 + + guard against empty replicas list + +commit 6d9e3d4c9241990145cb86193ccd07218b09466e +Author: Elias Karakoulakis +Date: Mon Feb 12 09:17:02 2018 +0200 + + guard against empty replicas list + +commit f1639a649ebcfe11bce6782ab281a59d05b6d9e9 +Author: danielqsj +Date: Fri Jan 12 21:03:06 2018 +0800 + + Release 1.0.1 + +commit cd611565df887b871d719e66362a0254c4886e5e +Author: danielqsj +Date: Fri Jan 12 17:51:05 2018 +0800 + + Fix bug of sasl handshake + +commit 91bcf3282f7dc4d501ab1c8540e7702d93126bc4 +Author: danielqsj +Date: Mon Jan 8 23:02:46 2018 +0800 + + Release 1.0.0 + +commit b647e3a719767823c01d68985b6b2e6798bad06a +Author: danielqsj +Date: Mon Jan 8 22:59:38 2018 +0800 + + Update doc + +commit 850362bf9a0ecedc20aac912ac50d54bf00ef0e8 +Author: danielqsj +Date: Mon Jan 8 22:56:29 2018 +0800 + + Update doc + +commit 5eddc28357274fe2fadfb2ebf2c5f3834e975a1b +Author: danielqsj +Date: Mon Jan 8 22:55:08 2018 +0800 + + Update doc + +commit 83d04a06c2ae3d9262a40c5c225909e142762f9c +Author: danielqsj +Date: Mon Jan 8 22:53:02 2018 +0800 + + Update doc + +commit 1794a853b9c23caba6975b8ea5a5ba24d944d972 +Author: danielqsj +Date: Mon Jan 8 22:48:42 2018 +0800 + + Add help to kingpin + +commit cb1e40cfddfaf4f1e0f10a37482069773c2bd6f6 +Author: danielqsj +Date: Mon Jan 8 20:57:01 2018 +0800 + + Format variable + +commit 4d65832d36a07e1893b2a9ab156972e55091c128 +Author: danielqsj +Date: Mon Jan 8 18:20:08 2018 +0800 + + Update dockerfile + +commit a7ad65fa2944d589fccf9ec6baf3f3bea5ce5010 +Author: danielqsj +Date: Mon Jan 8 18:17:53 2018 +0800 + + Update param help + +commit af143b31cff38265c67f36050787519e4c67e56d +Author: danielqsj +Date: Mon Jan 8 18:16:03 2018 +0800 + + Enhance tls + +commit 83dfac7a5ff7ca0c4e08f60095f07d2d704e4a24 +Author: danielqsj +Date: Mon Jan 8 17:10:09 2018 +0800 + + Update param + +commit 85357f37e590af2e7e6e09ab91f4bf4e6fe1bf43 +Author: danielqsj +Date: Mon Jan 8 17:00:52 2018 +0800 + + Add option to turn down SASL handshake + +commit 68a28ebb90d431822ace8910daf96794f48b2788 +Author: danielqsj +Date: Mon Jan 8 14:10:58 2018 +0800 + + Change sarama client id + +commit dd0713e6da9c5b44ec9588f4137b71474722684f +Author: danielqsj +Date: Mon Jan 8 14:07:42 2018 +0800 + + Add log.level and log.enableSarama + +commit 74b5048b8107e2e297d972506567f1589e44ab84 +Author: danielqsj +Date: Mon Jan 8 13:59:13 2018 +0800 + + Add option to enable sarama logging + +commit da5a4975eab59b565f186f3ac998df62c57f18fd +Author: danielqsj +Date: Mon Jan 8 10:35:36 2018 +0800 + + Update sarama to v1.15.0 + +commit 73401c7357187dec15d95bceea70c02f7ebaa2e1 +Merge: bccac40 98d9ef0 +Author: Daniel (Shijun) Qian +Date: Thu Jan 4 09:40:03 2018 +0800 + + Merge pull request #5 from gpaggi/master + + Add TLS support and fix concurrency issue while updating struct + +commit 98d9ef07f6d359e43160c5a9c9ef7f4ff838b8ba +Author: Gabriele Paggi +Date: Wed Jan 3 20:08:34 2018 +0100 + + Configure version when initializing the client. If done later on it overrides the whole config, wiping the SASL configuration, if any + +commit b1f7575f47d09cb02fcc83c8ebe31fe36abf478c +Author: Gabriele Paggi +Date: Tue Jan 2 16:12:02 2018 +0100 + + Fix concurrency issue + +commit 109595308407ec1877dca13437586d45d7ffb671 +Author: Gabriele Paggi +Date: Tue Jan 2 12:01:56 2018 +0100 + + Add support for SSL + +commit bccac4045e055ce97a7407a0bde5dcffa317e7a7 +Author: danielqsj +Date: Fri Dec 22 10:27:19 2017 +0800 + + Update doc + +commit 77161033720d842d22e52a02250934ca83a3c42c +Author: danielqsj +Date: Wed Dec 6 16:59:37 2017 +0800 + + Release 0.3.0 + +commit efb379bec9fbc5457ce5efcd152af4cbf111da3f +Author: danielqsj +Date: Wed Dec 6 16:34:36 2017 +0800 + + Update doc + +commit c6ee116579491380a0a04feb1022d6bf75f868af +Author: danielqsj +Date: Wed Dec 6 16:28:22 2017 +0800 + + Update dependency constraint + +commit d045495d87d04e9019d89b2b145ae99ed896544b +Author: danielqsj +Date: Wed Dec 6 16:26:44 2017 +0800 + + Update doc + +commit b9aba6b3361cadd1bb05ec08c6c3cc11bc73695f +Author: danielqsj +Date: Wed Dec 6 16:24:51 2017 +0800 + + Update param description + +commit 4ad3090574d2318e81326f253106c51e44b6289b +Author: danielqsj +Date: Wed Dec 6 16:02:04 2017 +0800 + + Close sarama client when exit + +commit 10003b6dd108b70308d108d45dfaf9cedd0a1f88 +Author: danielqsj +Date: Wed Dec 6 15:57:58 2017 +0800 + + Fix topic sync + +commit fbcdb27c6c9cfb69c9d8dfd93ccbd9932afa3ea1 +Author: danielqsj +Date: Wed Dec 6 15:19:36 2017 +0800 + + Add support for topic filter + +commit cc40763808089bac02ee46d34514801904d0e4a4 +Author: danielqsj +Date: Wed Dec 6 14:23:09 2017 +0800 + + set default value for sasl.enabled + +commit e0a817d8b949b0f28707d21cccf87c72cdfa070b +Author: danielqsj +Date: Wed Dec 6 14:19:54 2017 +0800 + + Update sarama to 1.14.0 + +commit 3cca26acc510201e4e62b9af802460c0a88270e3 +Author: danielqsj +Date: Wed Dec 6 14:01:29 2017 +0800 + + Update gitignore and ci config + +commit 019a4322d8446cc562f55f96ae78fbc8eb4fad98 +Merge: 9aecc7d 9c294a9 +Author: Daniel (Shijun) Qian +Date: Sun Dec 3 19:22:23 2017 -0600 + + Merge pull request #3 from wakeful/feature/add_sasl_support + + add support for configuring SASL user & passwd + +commit 9c294a976ddd4075f0cbaf9b1e842c3d88560409 +Author: AJ +Date: Tue Nov 21 18:55:02 2017 +0000 + + add support for configuring SASL user & passwd + +commit 9aecc7d5965edf2230f8eb6992103396dfc11041 +Author: danielqsj +Date: Fri Oct 20 15:40:02 2017 +0800 + + release 0.2.0 + +commit 67070af539a0d4c41fb09b24879871fc1456b00f +Author: danielqsj +Date: Fri Oct 20 14:53:44 2017 +0800 + + Change port to 9308 and accept multiple addresses + +commit 7576d6aa0abd77ae33c982936fac83ec273ce099 +Author: danielqsj +Date: Thu Oct 19 17:44:07 2017 +0800 + + Update CI + +commit f8f300d1d00661a7178b6f830b01417f01c66e61 +Author: danielqsj +Date: Thu Oct 19 17:15:22 2017 +0800 + + Update CI + +commit d2892ccf7ed1ee96888b64ebb10b164f4bfa4400 +Author: danielqsj +Date: Thu Oct 19 16:31:50 2017 +0800 + + Update CI + +commit bf59918794f3c7261a93e4f4a2f51a03456bebbc +Author: danielqsj +Date: Thu Oct 19 16:29:42 2017 +0800 + + Update CI + +commit a06c140cd4c8591a9f04e5e7d93ca7d4801d064c +Author: danielqsj +Date: Thu Oct 19 16:25:32 2017 +0800 + + Update doc + +commit 5cf1d2f6ef73dfb1619719caf4b406c77a1837aa +Author: danielqsj +Date: Thu Oct 19 15:02:42 2017 +0800 + + Improve CI + +commit cf1e830c66812521505d6df8ebf4cf23422b7b93 +Author: danielqsj +Date: Thu Oct 19 14:25:27 2017 +0800 + + Update doc + +commit 2721ee48b6fac14bc070d9e5eaa1fba2738af16a +Author: danielqsj +Date: Thu Oct 19 14:21:47 2017 +0800 + + Add ci + +commit fc17588dfbc59191f20ee3e2b3354cf97827be6a +Author: danielqsj +Date: Thu Oct 19 14:18:34 2017 +0800 + + Update README.md + +commit 8a835b17e323a4f60a41ef73ca2f8d58663daec4 +Author: danielqsj +Date: Thu Oct 19 14:06:39 2017 +0800 + + Update README.md + +commit 666588248557814e069d1a0a9e7ce9e72864e505 +Author: danielqsj +Date: Thu Oct 19 14:01:29 2017 +0800 + + Format name of metrics + +commit a4fcd7b7be7ad06a9e327b299209bd8596efd47c +Author: Daniel (Shijun) Qian +Date: Wed Oct 18 17:55:01 2017 +0800 + + Update README.md + +commit ceaaee5e2da2af8754e282f64f2cd6bd1bcfcedb +Author: Daniel (Shijun) Qian +Date: Wed Oct 18 17:49:49 2017 +0800 + + Update README.md + +commit 41f39ada36ae297dfe7696497af673d7709d7812 +Author: Daniel (Shijun) Qian +Date: Wed Oct 18 17:46:42 2017 +0800 + + Update README.md + +commit 14bfbe1e4ec3b376825423acec777109345a4cd5 +Author: danielqsj +Date: Wed Oct 18 15:29:45 2017 +0800 + + Update 0.1.0 + +commit 36ddbe453c51c26bc8d99fd6b6575e12fc46351f +Author: Daniel (Shijun) Qian +Date: Fri Sep 15 11:23:21 2017 +0800 + + Initial commit diff --git a/vendor/github.com/krallistic/kazoo-go/.gitignore b/vendor/github.com/krallistic/kazoo-go/.gitignore new file mode 100644 index 00000000..cc8fabd6 --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/.gitignore @@ -0,0 +1,2 @@ +kazoo.test +confluent/ diff --git a/vendor/github.com/krallistic/kazoo-go/.travis.yml b/vendor/github.com/krallistic/kazoo-go/.travis.yml new file mode 100644 index 00000000..ee5968b1 --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/.travis.yml @@ -0,0 +1,31 @@ +language: go +go: +- "1.5" +- "1.6" + +env: + global: + - ZOOKEEPER_PEERS=localhost:2181 + - DEBUG=true + +install: +- make dependencies + +before_script: +- make confluent/kafka/start +- make test/create_kafka_topics + +script: +- make test +- make vet +- make errcheck +- make fmt + +matrix: + include: + - go: tip + allow_failures: + - go: tip + fast_finish: true + +sudo: false diff --git a/vendor/github.com/krallistic/kazoo-go/MIT-LICENSE b/vendor/github.com/krallistic/kazoo-go/MIT-LICENSE new file mode 100644 index 00000000..87d42803 --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/MIT-LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Willem van Bergen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/krallistic/kazoo-go/Makefile b/vendor/github.com/krallistic/kazoo-go/Makefile new file mode 100644 index 00000000..87ac0652 --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/Makefile @@ -0,0 +1,84 @@ +.PHONY: confluent/kafka/* confluent/zookeeper/* confluent/registry/* confluent/start confluent/stop fmt vet errcheck test test/create_kafka_topics dependencies dependencies/* + + +default: fmt vet errcheck test + + +# Confluent platform tasks + +confluent/start: confluent/rest/start + +confluent/stop: confluent/rest/stop confluent/registry/stop confluent/kafka/stop confluent/zookeeper/stop + +# Download & extract tasks + +confluent/confluent.tgz: + mkdir -p confluent && wget http://packages.confluent.io/archive/3.0/confluent-3.0.1-2.11.tar.gz -O confluent/confluent.tgz + +confluent/EXTRACTED: confluent/confluent.tgz + tar xzf confluent/confluent.tgz -C confluent --strip-components 1 && mkdir confluent/logs && touch confluent/EXTRACTED + echo "delete.topic.enable=true" >> confluent/etc/kafka/server.properties + +# Zookeeper tasks + +confluent/zookeeper/start: confluent/EXTRACTED + nohup confluent/bin/zookeeper-server-start confluent/etc/kafka/zookeeper.properties 2> confluent/logs/zookeeper.err > confluent/logs/zookeeper.out < /dev/null & + while ! nc localhost 2181 confluent/logs/kafka.err > confluent/logs/kafka.out < /dev/null & + while ! nc localhost 9092 confluent/logs/schema-registry.err > confluent/logs/schema-registry.out < /dev/null & + while ! nc localhost 8081 confluent/logs/kafka-rest.err > confluent/logs/kafka-rest.out < /dev/null & + while ! nc localhost 8082 0 { + return ErrRunningInstances + } + + return cg.kz.deleteRecursive(fmt.Sprintf("%s/consumers/%s", cg.kz.conf.Chroot, cg.Name)) +} + +// Instances returns a map of all running instances inside this consumergroup. +func (cg *Consumergroup) Instances() (ConsumergroupInstanceList, error) { + root := fmt.Sprintf("%s/consumers/%s/ids", cg.kz.conf.Chroot, cg.Name) + if exists, err := cg.kz.exists(root); err != nil { + return nil, err + } else if exists { + cgis, _, err := cg.kz.conn.Children(root) + if err != nil { + return nil, err + } + + result := make(ConsumergroupInstanceList, 0, len(cgis)) + for _, cgi := range cgis { + result = append(result, cg.Instance(cgi)) + } + return result, nil + } else { + result := make(ConsumergroupInstanceList, 0) + return result, nil + } +} + +// WatchInstances returns a ConsumergroupInstanceList, and a channel that will be closed +// as soon the instance list changes. +func (cg *Consumergroup) WatchInstances() (ConsumergroupInstanceList, <-chan zk.Event, error) { + node := fmt.Sprintf("%s/consumers/%s/ids", cg.kz.conf.Chroot, cg.Name) + if exists, err := cg.kz.exists(node); err != nil { + return nil, nil, err + } else if !exists { + if err := cg.kz.mkdirRecursive(node); err != nil { + return nil, nil, err + } + } + + cgis, _, c, err := cg.kz.conn.ChildrenW(node) + if err != nil { + return nil, nil, err + } + + result := make(ConsumergroupInstanceList, 0, len(cgis)) + for _, cgi := range cgis { + result = append(result, cg.Instance(cgi)) + } + + return result, c, nil +} + +// NewInstance instantiates a new ConsumergroupInstance inside this consumer group, +// using a newly generated ID. +func (cg *Consumergroup) NewInstance() *ConsumergroupInstance { + id, err := generateConsumerInstanceID() + if err != nil { + panic(err) + } + return cg.Instance(id) +} + +// Instance instantiates a new ConsumergroupInstance inside this consumer group, +// using an existing ID. +func (cg *Consumergroup) Instance(id string) *ConsumergroupInstance { + return &ConsumergroupInstance{cg: cg, ID: id} +} + +// PartitionOwner returns the ConsumergroupInstance that has claimed the given partition. +// This can be nil if nobody has claimed it yet. +func (cg *Consumergroup) PartitionOwner(topic string, partition int32) (*ConsumergroupInstance, error) { + node := fmt.Sprintf("%s/consumers/%s/owners/%s/%d", cg.kz.conf.Chroot, cg.Name, topic, partition) + val, _, err := cg.kz.conn.Get(node) + + // If the node does not exists, nobody has claimed it. + switch err { + case nil: + return &ConsumergroupInstance{cg: cg, ID: string(val)}, nil + case zk.ErrNoNode: + return nil, nil + default: + return nil, err + } +} + +// WatchPartitionOwner retrieves what instance is currently owning the partition, and sets a +// Zookeeper watch to be notified of changes. If the partition currently does not have an owner, +// the function returns nil for every return value. In this case is should be safe to claim +// the partition for an instance. +func (cg *Consumergroup) WatchPartitionOwner(topic string, partition int32) (*ConsumergroupInstance, <-chan zk.Event, error) { + node := fmt.Sprintf("%s/consumers/%s/owners/%s/%d", cg.kz.conf.Chroot, cg.Name, topic, partition) + instanceID, _, changed, err := cg.kz.conn.GetW(node) + + switch err { + case nil: + return &ConsumergroupInstance{cg: cg, ID: string(instanceID)}, changed, nil + + case zk.ErrNoNode: + return nil, nil, nil + + default: + return nil, nil, err + } +} + +// Registered checks whether the consumergroup instance is registered in Zookeeper. +func (cgi *ConsumergroupInstance) Registered() (bool, error) { + node := fmt.Sprintf("%s/consumers/%s/ids/%s", cgi.cg.kz.conf.Chroot, cgi.cg.Name, cgi.ID) + return cgi.cg.kz.exists(node) +} + +// Registered returns current registration of the consumer group instance. +func (cgi *ConsumergroupInstance) Registration() (*Registration, error) { + node := fmt.Sprintf("%s/consumers/%s/ids/%s", cgi.cg.kz.conf.Chroot, cgi.cg.Name, cgi.ID) + val, _, err := cgi.cg.kz.conn.Get(node) + if err != nil { + return nil, err + } + + reg := &Registration{} + if err := json.Unmarshal(val, reg); err != nil { + return nil, err + } + return reg, nil +} + +// RegisterSubscription registers the consumer instance in Zookeeper, with its subscription. +func (cgi *ConsumergroupInstance) RegisterWithSubscription(subscriptionJSON []byte) error { + if exists, err := cgi.Registered(); err != nil { + return err + } else if exists { + return ErrInstanceAlreadyRegistered + } + + // Create an ephemeral node for the the consumergroup instance. + node := fmt.Sprintf("%s/consumers/%s/ids/%s", cgi.cg.kz.conf.Chroot, cgi.cg.Name, cgi.ID) + return cgi.cg.kz.create(node, subscriptionJSON, true) +} + +// Register registers the consumergroup instance in Zookeeper. +func (cgi *ConsumergroupInstance) Register(topics []string) error { + subscription := make(map[string]int) + for _, topic := range topics { + subscription[topic] = 1 + } + + data, err := json.Marshal(&Registration{ + Pattern: RegPatternStatic, + Subscription: subscription, + Timestamp: time.Now().Unix(), + Version: RegDefaultVersion, + }) + if err != nil { + return err + } + + return cgi.RegisterWithSubscription(data) +} + +// Deregister removes the registration of the instance from zookeeper. +func (cgi *ConsumergroupInstance) Deregister() error { + node := fmt.Sprintf("%s/consumers/%s/ids/%s", cgi.cg.kz.conf.Chroot, cgi.cg.Name, cgi.ID) + exists, stat, err := cgi.cg.kz.conn.Exists(node) + if err != nil { + return err + } else if !exists { + return ErrInstanceNotRegistered + } + + return cgi.cg.kz.conn.Delete(node, stat.Version) +} + +// Claim claims a topic/partition ownership for a consumer ID within a group. If the +// partition is already claimed by another running instance, it will return ErrAlreadyClaimed. +func (cgi *ConsumergroupInstance) ClaimPartition(topic string, partition int32) error { + root := fmt.Sprintf("%s/consumers/%s/owners/%s", cgi.cg.kz.conf.Chroot, cgi.cg.Name, topic) + if err := cgi.cg.kz.mkdirRecursive(root); err != nil { + return err + } + + // Create an ephemeral node for the partition to claim the partition for this instance + node := fmt.Sprintf("%s/%d", root, partition) + err := cgi.cg.kz.create(node, []byte(cgi.ID), true) + switch err { + case zk.ErrNodeExists: + data, _, err := cgi.cg.kz.conn.Get(node) + if err != nil { + return err + } + if string(data) != cgi.ID { + // Return a separate error for this, to allow for implementing a retry mechanism. + return ErrPartitionClaimedByOther + } + return nil + default: + return err + } +} + +// ReleasePartition releases a claim to a partition. +func (cgi *ConsumergroupInstance) ReleasePartition(topic string, partition int32) error { + owner, err := cgi.cg.PartitionOwner(topic, partition) + if err != nil { + return err + } + if owner == nil || owner.ID != cgi.ID { + return ErrPartitionNotClaimed + } + + node := fmt.Sprintf("%s/consumers/%s/owners/%s/%d", cgi.cg.kz.conf.Chroot, cgi.cg.Name, topic, partition) + return cgi.cg.kz.conn.Delete(node, 0) +} + +// Topics retrieves the list of topics the consumergroup has claimed ownership of at some point. +func (cg *Consumergroup) Topics() (TopicList, error) { + root := fmt.Sprintf("%s/consumers/%s/owners", cg.kz.conf.Chroot, cg.Name) + children, _, err := cg.kz.conn.Children(root) + if err != nil { + return nil, err + } + + result := make(TopicList, 0, len(children)) + for _, name := range children { + result = append(result, cg.kz.Topic(name)) + } + return result, nil +} + +// CommitOffset commits an offset to a group/topic/partition +func (cg *Consumergroup) CommitOffset(topic string, partition int32, offset int64) error { + node := fmt.Sprintf("%s/consumers/%s/offsets/%s/%d", cg.kz.conf.Chroot, cg.Name, topic, partition) + data := []byte(fmt.Sprintf("%d", offset)) + + _, stat, err := cg.kz.conn.Get(node) + switch err { + case zk.ErrNoNode: // Create a new node + return cg.kz.create(node, data, false) + + case nil: // Update the existing node + _, err := cg.kz.conn.Set(node, data, stat.Version) + return err + + default: + return err + } +} + +// FetchOffset retrieves an offset to a group/topic/partition +func (cg *Consumergroup) FetchOffset(topic string, partition int32) (int64, error) { + node := fmt.Sprintf("%s/consumers/%s/offsets/%s/%d", cg.kz.conf.Chroot, cg.Name, topic, partition) + val, _, err := cg.kz.conn.Get(node) + if err == zk.ErrNoNode { + return -1, nil + } else if err != nil { + return -1, err + } + return strconv.ParseInt(string(val), 10, 64) +} + +// FetchOffset retrieves all the commmitted offsets for a group +func (cg *Consumergroup) FetchAllOffsets() (map[string]map[int32]int64, error) { + result := make(map[string]map[int32]int64) + + offsetsNode := fmt.Sprintf("%s/consumers/%s/offsets", cg.kz.conf.Chroot, cg.Name) + topics, _, err := cg.kz.conn.Children(offsetsNode) + if err == zk.ErrNoNode { + return result, nil + } else if err != nil { + return nil, err + } + + for _, topic := range topics { + result[topic] = make(map[int32]int64) + topicNode := fmt.Sprintf("%s/consumers/%s/offsets/%s", cg.kz.conf.Chroot, cg.Name, topic) + partitions, _, err := cg.kz.conn.Children(topicNode) + if err != nil { + return nil, err + } + + for _, partition := range partitions { + partitionNode := fmt.Sprintf("%s/consumers/%s/offsets/%s/%s", cg.kz.conf.Chroot, cg.Name, topic, partition) + val, _, err := cg.kz.conn.Get(partitionNode) + if err != nil { + return nil, err + } + + partition, err := strconv.ParseInt(partition, 10, 32) + if err != nil { + return nil, err + } + + offset, err := strconv.ParseInt(string(val), 10, 64) + if err != nil { + return nil, err + } + + result[topic][int32(partition)] = offset + } + } + + return result, nil +} + +func (cg *Consumergroup) ResetOffsets() error { + offsetsNode := fmt.Sprintf("%s/consumers/%s/offsets", cg.kz.conf.Chroot, cg.Name) + topics, _, err := cg.kz.conn.Children(offsetsNode) + if err == zk.ErrNoNode { + return nil + } else if err != nil { + return err + } + + for _, topic := range topics { + topicNode := fmt.Sprintf("%s/consumers/%s/offsets/%s", cg.kz.conf.Chroot, cg.Name, topic) + partitions, stat, err := cg.kz.conn.Children(topicNode) + if err != nil { + return err + } + + for _, partition := range partitions { + partitionNode := fmt.Sprintf("%s/consumers/%s/offsets/%s/%s", cg.kz.conf.Chroot, cg.Name, topic, partition) + exists, stat, err := cg.kz.conn.Exists(partitionNode) + if exists { + if err = cg.kz.conn.Delete(partitionNode, stat.Version); err != nil { + if err != zk.ErrNoNode { + return err + } + } + } + } + + if err := cg.kz.conn.Delete(topicNode, stat.Version); err != nil { + if err != zk.ErrNoNode { + return err + } + } + } + + return nil +} + +// generateUUID Generates a UUIDv4. +func generateUUID() (string, error) { + uuid := make([]byte, 16) + n, err := io.ReadFull(rand.Reader, uuid) + if n != len(uuid) || err != nil { + return "", err + } + // variant bits; see section 4.1.1 + uuid[8] = uuid[8]&^0xc0 | 0x80 + // version 4 (pseudo-random); see section 4.1.3 + uuid[6] = uuid[6]&^0xf0 | 0x40 + return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil +} + +// generateConsumerInstanceID generates a consumergroup Instance ID +// that is almost certain to be unique. +func generateConsumerInstanceID() (string, error) { + uuid, err := generateUUID() + if err != nil { + return "", err + } + + hostname, err := os.Hostname() + if err != nil { + return "", err + } + + return fmt.Sprintf("%s:%s", hostname, uuid), nil +} + +// Find returns the consumergroup with the given name if it exists in the list. +// Otherwise it will return `nil`. +func (cgl ConsumergroupList) Find(name string) *Consumergroup { + for _, cg := range cgl { + if cg.Name == name { + return cg + } + } + return nil +} + +func (cgl ConsumergroupList) Len() int { + return len(cgl) +} + +func (cgl ConsumergroupList) Less(i, j int) bool { + return cgl[i].Name < cgl[j].Name +} + +func (cgl ConsumergroupList) Swap(i, j int) { + cgl[i], cgl[j] = cgl[j], cgl[i] +} + +// Find returns the consumergroup instance with the given ID if it exists in the list. +// Otherwise it will return `nil`. +func (cgil ConsumergroupInstanceList) Find(id string) *ConsumergroupInstance { + for _, cgi := range cgil { + if cgi.ID == id { + return cgi + } + } + return nil +} + +func (cgil ConsumergroupInstanceList) Len() int { + return len(cgil) +} + +func (cgil ConsumergroupInstanceList) Less(i, j int) bool { + return cgil[i].ID < cgil[j].ID +} + +func (cgil ConsumergroupInstanceList) Swap(i, j int) { + cgil[i], cgil[j] = cgil[j], cgil[i] +} diff --git a/vendor/github.com/krallistic/kazoo-go/functional_cluster_test.go b/vendor/github.com/krallistic/kazoo-go/functional_cluster_test.go new file mode 100644 index 00000000..e679254e --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/functional_cluster_test.go @@ -0,0 +1,98 @@ +package kazoo + +import ( + "fmt" + "net" + "os" + "strings" + "testing" + "time" +) + +var ( + // By default, assume we're using Sarama's vagrant cluster when running tests + zookeeperPeers []string = []string{"192.168.100.67:2181", "192.168.100.67:2182", "192.168.100.67:2183", "192.168.100.67:2184", "192.168.100.67:2185"} +) + +func init() { + if zookeeperPeersEnv := os.Getenv("ZOOKEEPER_PEERS"); zookeeperPeersEnv != "" { + zookeeperPeers = strings.Split(zookeeperPeersEnv, ",") + } + + fmt.Printf("Using Zookeeper cluster at %v\n", zookeeperPeers) +} + +func TestBrokers(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + + brokers, err := kz.Brokers() + if err != nil { + t.Fatal(err) + } + + if len(brokers) == 0 { + t.Error("Expected at least one broker") + } + + for id, addr := range brokers { + if conn, err := net.DialTimeout("tcp", addr, 100*time.Millisecond); err != nil { + t.Errorf("Failed to connect to Kafka broker %d at %s", id, addr) + } else { + _ = conn.Close() + } + } + + assertSuccessfulClose(t, kz) +} + +func TestBrokerList(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + + brokers, err := kz.BrokerList() + if err != nil { + t.Fatal(err) + } + + if len(brokers) == 0 { + t.Error("Expected at least one broker") + } + + for _, addr := range brokers { + if conn, err := net.DialTimeout("tcp", addr, 100*time.Millisecond); err != nil { + t.Errorf("Failed to connect to Kafka broker at %s", addr) + } else { + _ = conn.Close() + } + } + + assertSuccessfulClose(t, kz) +} + +func TestController(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + + brokers, err := kz.Brokers() + if err != nil { + t.Fatal(err) + } + + controller, err := kz.Controller() + if err != nil { + t.Fatal(err) + } + + if _, ok := brokers[controller]; !ok { + t.Error("Expected the controller's BrokerID to be an existing one") + } + + assertSuccessfulClose(t, kz) +} diff --git a/vendor/github.com/krallistic/kazoo-go/functional_consumergroup_test.go b/vendor/github.com/krallistic/kazoo-go/functional_consumergroup_test.go new file mode 100644 index 00000000..49aa7156 --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/functional_consumergroup_test.go @@ -0,0 +1,658 @@ +package kazoo + +import ( + "reflect" + "sync" + "testing" + "time" + + "github.com/samuel/go-zookeeper/zk" +) + +func TestConsumergroups(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + defer assertSuccessfulClose(t, kz) + + cg := kz.Consumergroup("test.kazoo.TestConsumergroups") + + cgs, err := kz.Consumergroups() + if err != nil { + t.Error(err) + } + originalCount := len(cgs) + + if cg := cgs.Find(cg.Name); cg != nil { + t.Error("Consumergoup `test.kazoo.TestConsumergroups` should not be found") + } + + if exists, _ := cg.Exists(); exists { + t.Error("Consumergoup `test.kazoo.TestConsumergroups` should not be registered yet") + } + + if err := cg.Create(); err != nil { + t.Error(err) + } + + if exists, _ := cg.Exists(); !exists { + t.Error("Consumergoup `test.kazoo.TestConsumergroups` should be registered now") + } + + cgs, err = kz.Consumergroups() + if err != nil { + t.Error(err) + } + + if len(cgs) != originalCount+1 { + t.Error("Should have one more consumergroup than at the start") + } + + if err := cg.Delete(); err != nil { + t.Error(err) + } + + if exists, _ := cg.Exists(); exists { + t.Error("Consumergoup `test.kazoo.TestConsumergroups` should not be registered anymore") + } + + cgs, err = kz.Consumergroups() + if err != nil { + t.Error(err) + } + + if len(cgs) != originalCount { + t.Error("Should have the original number of consumergroups again") + } +} + +func TestConsumergroupInstances(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + defer assertSuccessfulClose(t, kz) + + cg := kz.Consumergroup("test.kazoo.TestConsumergroupInstances") + if err := cg.Create(); err != nil { + t.Fatal(err) + } + defer func() { + if err := cg.Delete(); err != nil { + t.Error(err) + } + }() + + if instances, err := cg.Instances(); err != nil { + t.Error(err) + } else if len(instances) != 0 { + t.Fatal("Expected no active consumergroup instances") + } + + instance1 := cg.NewInstance() + // Make sure that the instance is unregistered. + if reg, err := instance1.Registration(); err != zk.ErrNoNode || reg != nil { + t.Errorf("Expected no registration: reg=%v, err=(%v)", reg, err) + } + + // Register a new instance + if instance1.ID == "" { + t.Error("It should generate a valid instance ID") + } + if err := instance1.Register([]string{"topic"}); err != nil { + t.Error(err) + } + + // Verify registration + reg, err := instance1.Registration() + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(reg.Subscription, map[string]int{"topic": 1}) { + t.Errorf("Unexpected registration: %v", reg) + } + + // Try to register an instance with the same ID. + if err := cg.Instance(instance1.ID).Register([]string{"topic"}); err != ErrInstanceAlreadyRegistered { + t.Error("The instance should already be registered") + } + + instance2 := cg.Instance("test") + if err := instance2.Register([]string{"topic"}); err != nil { + t.Error(err) + } + + time.Sleep(50 * time.Millisecond) + + if instances, err := cg.Instances(); err != nil { + t.Error(err) + } else { + if len(instances) != 2 { + t.Error("Expected 2 active consumergroup instances") + } + if i := instances.Find(instance1.ID); i == nil { + t.Error("Expected instance1 to be registered.") + } + if i := instances.Find(instance2.ID); i == nil { + t.Error("Expected instance2 to be registered.") + } + } + + // Deregister the two running instances + if err := instance1.Deregister(); err != nil { + t.Error(err) + } + if err := instance2.Deregister(); err != nil { + t.Error(err) + } + + // Try to deregister an instance that was not register + instance3 := cg.NewInstance() + if err := instance3.Deregister(); err != ErrInstanceNotRegistered { + t.Error("Expected new instance to not be registered") + } + + if instances, err := cg.Instances(); err != nil { + t.Error(err) + } else if len(instances) != 0 { + t.Error("Expected no active consumergroup instances") + } +} + +func TestConsumergroupInstanceCrash(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + defer assertSuccessfulClose(t, kz) + + cg := kz.Consumergroup("test.kazoo.TestConsumergroupInstancesEphemeral") + if err := cg.Create(); err != nil { + t.Fatal(err) + } + defer func() { + if err := cg.Delete(); err != nil { + t.Error(err) + } + }() + + // Create a kazoo instance on which we will simulate a crash. + config := NewConfig() + config.Timeout = 50 * time.Millisecond + crashingKazoo, err := NewKazoo(zookeeperPeers, config) + if err != nil { + t.Fatal(err) + } + crashingCG := crashingKazoo.Consumergroup(cg.Name) + + // Instantiate and register the instance. + instance := crashingCG.NewInstance() + if err := instance.Register([]string{"test.1"}); err != nil { + t.Error(err) + } + + time.Sleep(50 * time.Millisecond) + if instances, err := cg.Instances(); err != nil { + t.Error(err) + } else if len(instances) != 1 { + t.Error("Should have 1 running instance, found", len(instances)) + } + + // Simulate a crash, and wait for Zookeeper to pick it up + _ = crashingKazoo.Close() + time.Sleep(200 * time.Millisecond) + + if instances, err := cg.Instances(); err != nil { + t.Error(err) + } else if len(instances) != 0 { + t.Error("Should have 0 running instances") + } +} + +func TestConsumergroupWatchInstances(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + defer assertSuccessfulClose(t, kz) + + cg := kz.Consumergroup("test.kazoo.TestConsumergroupWatchInstances") + if err := cg.Create(); err != nil { + t.Fatal(err) + } + defer func() { + if err := cg.Delete(); err != nil { + t.Error(err) + } + }() + + instances, c, err := cg.WatchInstances() + if err != nil { + t.Fatal(err) + } + + if len(instances) != 0 { + t.Error("Expected 0 running instances") + } + + instance := cg.NewInstance() + if err := instance.Register([]string{"topic"}); err != nil { + t.Fatal(err) + } + + // The instance watch should have been triggered + <-c + + instances, c, err = cg.WatchInstances() + if err != nil { + t.Fatal(err) + } + + if len(instances) != 1 { + t.Error("Expected 1 running instance") + } + + if err := instance.Deregister(); err != nil { + t.Fatal(err) + } + + // The instance watch should have been triggered again + <-c + + instances, err = cg.Instances() + if err != nil { + t.Fatal(err) + } + + if len(instances) != 0 { + t.Error("Expected 0 running instances") + } +} + +func TestConsumergroupInstanceClaimPartition(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + defer assertSuccessfulClose(t, kz) + + cg := kz.Consumergroup("test.kazoo.TestConsumergroupInstanceClaimPartition") + if err := cg.Create(); err != nil { + t.Fatal(err) + } + defer func() { + if err := cg.Delete(); err != nil { + t.Error(err) + } + }() + + // Create two instances for this consumergroup + + i1 := cg.NewInstance() + if err := i1.Register([]string{"test.4"}); err != nil { + t.Fatal(err) + } + i2 := cg.NewInstance() + if err := i2.Register([]string{"test.4"}); err != nil { + t.Fatal(err) + } + + // Claim all partitions divided by instance 1 and 2 + + if err := i1.ClaimPartition("test.4", 0); err != nil { + t.Error(err) + } + if err := i1.ClaimPartition("test.4", 1); err != nil { + t.Error(err) + } + if err := i2.ClaimPartition("test.4", 2); err != nil { + t.Error(err) + } + if err := i2.ClaimPartition("test.4", 3); err != nil { + t.Error(err) + } + + // Try to claim more partitions + if err := i1.ClaimPartition("test.4", 3); err != ErrPartitionClaimedByOther { + t.Error("Expected ErrPartitionClaimedByOther to be returned, found", err) + } + + if err := i2.ClaimPartition("test.4", 0); err != ErrPartitionClaimedByOther { + t.Error("Expected ErrPartitionClaimedByOther to be returned, found", err) + } + + // Instance 1: release some partitions + + if err := i1.ReleasePartition("test.4", 0); err != nil { + t.Error(err) + } + if err := i1.ReleasePartition("test.4", 1); err != nil { + t.Error(err) + } + + // Instance 2: claim the released partitions + + if err := i2.ClaimPartition("test.4", 0); err != nil { + t.Error(err) + } + if err := i2.ClaimPartition("test.4", 1); err != nil { + t.Error(err) + } + + // Instance 2: release all partitions + + if err := i2.ReleasePartition("test.4", 0); err != nil { + t.Error(err) + } + if err := i2.ReleasePartition("test.4", 1); err != nil { + t.Error(err) + } + if err := i2.ReleasePartition("test.4", 2); err != nil { + t.Error(err) + } + if err := i2.ReleasePartition("test.4", 3); err != nil { + t.Error(err) + } + + if err := i1.Deregister(); err != nil { + t.Error(err) + } + if err := i2.Deregister(); err != nil { + t.Error(err) + } +} + +func TestConsumergroupInstanceClaimPartitionSame(t *testing.T) { + // Given + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + defer assertSuccessfulClose(t, kz) + + cg := kz.Consumergroup("test.kazoo.TestConsumergroupInstanceClaimPartition2") + if err := cg.Create(); err != nil { + t.Fatal(err) + } + defer func() { + if err := cg.Delete(); err != nil { + t.Error(err) + } + }() + + instance := cg.NewInstance() + if err := instance.Register([]string{"test.4"}); err != nil { + t.Fatal(err) + } + + if err := instance.ClaimPartition("test.4", 0); err != nil { + t.Error(err) + } + + // When: claim the same partition again + err = instance.ClaimPartition("test.4", 0) + + // Then + if err != nil { + t.Error(err) + } + + // Cleanup + if err := instance.ReleasePartition("test.4", 0); err != nil { + t.Error(err) + } + if err := instance.Deregister(); err != nil { + t.Error(err) + } +} + +func TestConsumergroupInstanceWatchPartitionClaim(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + defer assertSuccessfulClose(t, kz) + + cg := kz.Consumergroup("test.kazoo.TestConsumergroupInstanceWatchPartitionClaim") + if err := cg.Create(); err != nil { + t.Fatal(err) + } + defer func() { + if err := cg.Delete(); err != nil { + t.Error(err) + } + }() + + instance1 := cg.NewInstance() + if err := instance1.Register([]string{"test.4"}); err != nil { + t.Fatal(err) + } + + // Assert the partition isn't claimed + instance, change, err := cg.WatchPartitionOwner("test.4", 0) + if err != nil { + t.Fatal(err) + } + if instance != nil { + t.Fatal("An unclaimed partition should not return an instance") + } + if change != nil { + t.Fatal("An unclaimed partition should not return a watch") + } + + // Now claim the partition + if err := instance1.ClaimPartition("test.4", 0); err != nil { + t.Fatal(err) + } + + // This time, we should get an insance back + instance, change, err = cg.WatchPartitionOwner("test.4", 0) + if err != nil { + t.Fatal(err) + } + + if instance.ID != instance1.ID { + t.Error("Our instance should have claimed the partition") + } + + go func() { + time.Sleep(100 * time.Millisecond) + if err := instance1.ReleasePartition("test.4", 0); err != nil { + t.Fatal(err) + } + }() + + // Wait for the zookeeper watch to trigger + <-change + + // Ensure the partition is no longer claimed + instance, err = cg.PartitionOwner("test.4", 0) + if err != nil { + t.Fatal(err) + } + if instance != nil { + t.Error("The partition should have been release by now") + } + + // Cleanup + if err := instance1.Deregister(); err != nil { + t.Error(err) + } +} + +func TestConsumergroupOffsets(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + defer assertSuccessfulClose(t, kz) + + cg := kz.Consumergroup("test.kazoo.TestConsumergroupOffsets") + if err := cg.Create(); err != nil { + t.Fatal(err) + } + defer func() { + if err := cg.Delete(); err != nil { + t.Error(err) + } + }() + + offset, err := cg.FetchOffset("test", 0) + if err != nil { + t.Error(err) + } + + if offset >= 0 { + t.Error("Expected to get a negative offset for a partition that hasn't seen an offset commit yet") + } + + if err := cg.CommitOffset("test", 0, 1234); err != nil { + t.Error(err) + } + + offset, err = cg.FetchOffset("test", 0) + if err != nil { + t.Error(err) + } + if offset != 1234 { + t.Error("Expected to get the offset that was committed.") + } +} + +func TestConsumergroupResetOffsetsRace(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + defer assertSuccessfulClose(t, kz) + + cg := kz.Consumergroup("test.kazoo.TestConsumergroupResetOffsetsRace") + if err := cg.Create(); err != nil { + t.Fatal(err) + } + defer func() { + if err := cg.Delete(); err != nil { + t.Error(err) + } + }() + + offsets, err := cg.FetchAllOffsets() + if err != nil { + t.Error(err) + } + + if len(offsets) > 0 { + t.Errorf("A new consumergroup shouldn't have any offsets set, but found offsets for %d topics", len(offsets)) + } + + if err := cg.CommitOffset("test", 0, 1234); err != nil { + t.Error(err) + } + + if err := cg.CommitOffset("test", 1, 2345); err != nil { + t.Error(err) + } + + offsets, err = cg.FetchAllOffsets() + if err != nil { + t.Error(err) + } + + if offsets["test"][0] == 1234 && offsets["test"][1] == 2345 { + t.Log("All offsets present in offset map") + } else { + t.Logf("Offset map not as expected: %v", offsets) + } + + cg2 := kz.Consumergroup("test.kazoo.TestConsumergroupResetOffsetsRace") + + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + if err := cg2.ResetOffsets(); err != nil { + t.Fatal(err) + } + }() + go func() { + defer wg.Done() + if err := cg.ResetOffsets(); err != nil { + t.Fatal(err) + } + }() + + wg.Wait() + + offsets, err = cg.FetchAllOffsets() + if err != nil { + t.Error(err) + } + + if len(offsets) > 0 { + t.Errorf("After a reset, consumergroup shouldn't have any offsets set, but found offsets for %d topics", len(offsets)) + } +} + +func TestConsumergroupResetOffsets(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + defer assertSuccessfulClose(t, kz) + + cg := kz.Consumergroup("test.kazoo.TestConsumergroupResetOffsets") + if err := cg.Create(); err != nil { + t.Fatal(err) + } + defer func() { + if err := cg.Delete(); err != nil { + t.Error(err) + } + }() + + offsets, err := cg.FetchAllOffsets() + if err != nil { + t.Error(err) + } + + if len(offsets) > 0 { + t.Errorf("A new consumergroup shouldn't have any offsets set, but found offsets for %d topics", len(offsets)) + } + + if err := cg.CommitOffset("test1", 0, 1234); err != nil { + t.Error(err) + } + + if err := cg.CommitOffset("test1", 1, 2345); err != nil { + t.Error(err) + } + + if err := cg.CommitOffset("test2", 0, 3456); err != nil { + t.Error(err) + } + + offsets, err = cg.FetchAllOffsets() + if err != nil { + t.Error(err) + } + + if offsets["test1"][0] == 1234 && offsets["test1"][1] == 2345 && offsets["test2"][0] == 3456 { + t.Log("All offsets present in offset map") + } else { + t.Logf("Offset map not as expected: %v", offsets) + } + + if err := cg.ResetOffsets(); err != nil { + t.Fatal(err) + } + + offsets, err = cg.FetchAllOffsets() + if err != nil { + t.Error(err) + } + + if len(offsets) > 0 { + t.Errorf("After a reset, consumergroup shouldn't have any offsets set, but found offsets for %d topics", len(offsets)) + } +} diff --git a/vendor/github.com/krallistic/kazoo-go/functional_topic_admin_test.go b/vendor/github.com/krallistic/kazoo-go/functional_topic_admin_test.go new file mode 100644 index 00000000..9313781f --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/functional_topic_admin_test.go @@ -0,0 +1,123 @@ +package kazoo + +import ( + "reflect" + "testing" + "time" +) + +func TestCreateDeleteTopic(t *testing.T) { + tests := []struct { + name string + partitionCount int + config map[string]string + err error + }{ + {"test.admin.1", 1, nil, nil}, + {"test.admin.1", 1, nil, ErrTopicExists}, + {"test.admin.2", 1, map[string]string{}, nil}, + {"test.admin.3", 4, map[string]string{"retention.ms": "604800000"}, nil}, + {"test.admin.3", 3, nil, ErrTopicExists}, + {"test.admin.4", 12, map[string]string{"retention.bytes": "1000000000", "retention.ms": "9999999"}, nil}, + } + + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + + for testIdx, test := range tests { + err = kz.CreateTopic(test.name, test.partitionCount, 1, test.config) + if err != test.err { + t.Errorf("Unexpected error (%v) creating %s for test %d", err, test.name, testIdx) + continue + } + if err == nil { + topic := kz.Topic(test.name) + conf, err := topic.Config() + if err != nil { + t.Errorf("Unable to get topic config (%v) for %s for test %d", err, test.name, testIdx) + } + // allow for nil == empty map + if !reflect.DeepEqual(conf, test.config) && !(test.config == nil && len(conf) == 0) { + t.Errorf("Invalid config for %s in test %d. Expected (%v) got (%v)", test.name, testIdx, conf, test.config) + } + } + + } + + // delete all test topics + topicMap := make(map[string]bool) + for _, test := range tests { + // delete if we haven't seen the topic before + if _, ok := topicMap[test.name]; !ok { + err := kz.DeleteTopic(test.name) + if err != nil { + t.Errorf("Unable to delete topic %s (%v)", test.name, err) + } + } + topicMap[test.name] = true + } + + totalToDelete := len(topicMap) + + // wait for deletion (up to 60s) + for i := 0; i < 15; i++ { + for name := range topicMap { + topic := &Topic{kz: kz, Name: name} + if exists, _ := topic.Exists(); !exists { + delete(topicMap, name) + } + } + // all topics deleted + if len(topicMap) == 0 { + break + } + time.Sleep(1 * time.Second) + } + + if len(topicMap) != 0 { + t.Errorf("Unable to delete all topics %d out of %d remaining after 15 seconds", len(topicMap), totalToDelete) + } +} + +func TestDeleteTopicSync(t *testing.T) { + + kz, err := NewKazoo(zookeeperPeers, nil) + + topicName := "test.admin.1" + + if err != nil { + t.Fatal(err) + } + + err = kz.CreateTopic(topicName, 1, 1, nil) + + if err != nil { + t.Errorf("Unexpected error (%v) creating topic %s", err, topicName) + } + + topic := kz.Topic("test.admin.1") + _, err = topic.Config() + + if err != nil { + t.Errorf("Unable to get topic config (%v) for %s", err, topicName) + } + + // delete the topic synchronously + err = kz.DeleteTopicSync(topicName, 0) + + if err != nil { + t.Errorf("Unexpected error (%v) while deleting topic synchronously", err) + } + + exists, err := topic.Exists() + + if err != nil { + t.Errorf("Unexpected error (%v) while checking if topic exists", err) + } + + if exists { + t.Error("Deleted topic still exists.") + } +} diff --git a/vendor/github.com/krallistic/kazoo-go/functional_topic_metadata_test.go b/vendor/github.com/krallistic/kazoo-go/functional_topic_metadata_test.go new file mode 100644 index 00000000..34c8f42b --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/functional_topic_metadata_test.go @@ -0,0 +1,111 @@ +package kazoo + +import ( + "testing" +) + +func TestTopics(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + + topics, err := kz.Topics() + if err != nil { + t.Error(err) + } + + existingTopic := topics.Find("test.4") + if existingTopic == nil { + t.Error("Expected topic test.4 to be returned") + } else if existingTopic.Name != "test.4" { + t.Error("Expected topic test.4 to have its name set") + } + + nonexistingTopic := topics.Find("__nonexistent__") + if nonexistingTopic != nil { + t.Error("Expected __nonexistent__ topic to not be defined") + } + + assertSuccessfulClose(t, kz) +} + +func TestTopicPartitions(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + + partitions, err := kz.Topic("test.4").Partitions() + if err != nil { + t.Fatal(err) + } + + if len(partitions) != 4 { + t.Errorf("Expected test.4 to have 4 partitions") + } + + brokers, err := kz.Brokers() + if err != nil { + t.Fatal(err) + } + + for index, partition := range partitions { + if partition.ID != int32(index) { + t.Error("partition.ID is not set properly") + } + + leader, err := partition.Leader() + if err != nil { + t.Fatal(err) + } + + if _, ok := brokers[leader]; !ok { + t.Errorf("Expected the leader of test.4/%d to be an existing broker.", partition.ID) + } + + isr, err := partition.ISR() + if err != nil { + t.Fatal(err) + } + + for _, brokerID := range isr { + if _, ok := brokers[brokerID]; !ok { + t.Errorf("Expected all ISRs of test.4/%d to be existing brokers.", partition.ID) + } + } + } + + assertSuccessfulClose(t, kz) +} + +func TestTopicConfig(t *testing.T) { + kz, err := NewKazoo(zookeeperPeers, nil) + if err != nil { + t.Fatal(err) + } + + topicConfig, err := kz.Topic("test.4").Config() + if err != nil { + t.Error(err) + } + if topicConfig["retention.ms"] != "604800000" { + t.Error("Expected retention.ms config for test.4 to be set to 604800000") + } + + topicConfig, err = kz.Topic("test.1").Config() + if err != nil { + t.Error(err) + } + if len(topicConfig) > 0 { + t.Error("Expected no topic level configuration to be set for test.1") + } + + assertSuccessfulClose(t, kz) +} + +func assertSuccessfulClose(t *testing.T, kz *Kazoo) { + if err := kz.Close(); err != nil { + t.Error(err) + } +} diff --git a/vendor/github.com/krallistic/kazoo-go/kazoo.go b/vendor/github.com/krallistic/kazoo-go/kazoo.go new file mode 100644 index 00000000..138b6200 --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/kazoo.go @@ -0,0 +1,262 @@ +package kazoo + +import ( + "encoding/json" + "errors" + "fmt" + "path" + "sort" + "strconv" + "strings" + "time" + + "github.com/samuel/go-zookeeper/zk" +) + +var ( + FailedToClaimPartition = errors.New("Failed to claim partition for this consumer instance. Do you have a rogue consumer running?") +) + +// ParseConnectionString parses a zookeeper connection string in the form of +// host1:2181,host2:2181/chroot and returns the list of servers, and the chroot. +func ParseConnectionString(zookeeper string) (nodes []string, chroot string) { + nodesAndChroot := strings.SplitN(zookeeper, "/", 2) + if len(nodesAndChroot) == 2 { + chroot = fmt.Sprintf("/%s", nodesAndChroot[1]) + } + nodes = strings.Split(nodesAndChroot[0], ",") + return +} + +// BuildConnectionString builds a Zookeeper connection string for a list of nodes. +// Returns a string like "zk1:2181,zk2:2181,zk3:2181" +func BuildConnectionString(nodes []string) string { + return strings.Join(nodes, ",") +} + +// ConnectionStringWithChroot builds a Zookeeper connection string for a list +// of nodes and a chroot. The chroot should start with "/". +// Returns a string like "zk1:2181,zk2:2181,zk3:2181/chroot" +func BuildConnectionStringWithChroot(nodes []string, chroot string) string { + return fmt.Sprintf("%s%s", strings.Join(nodes, ","), chroot) +} + +// Kazoo interacts with the Kafka metadata in Zookeeper +type Kazoo struct { + conn *zk.Conn + conf *Config +} + +// Config holds configuration values f. +type Config struct { + // The chroot the Kafka installation is registerde under. Defaults to "". + Chroot string + + // The amount of time the Zookeeper client can be disconnected from the Zookeeper cluster + // before the cluster will get rid of watches and ephemeral nodes. Defaults to 1 second. + Timeout time.Duration +} + +// NewConfig instantiates a new Config struct with sane defaults. +func NewConfig() *Config { + return &Config{Timeout: 1 * time.Second} +} + +// NewKazoo creates a new connection instance +func NewKazoo(servers []string, conf *Config) (*Kazoo, error) { + if conf == nil { + conf = NewConfig() + } + + conn, _, err := zk.Connect(servers, conf.Timeout) + if err != nil { + return nil, err + } + return &Kazoo{conn, conf}, nil +} + +// NewKazooFromConnectionString creates a new connection instance +// based on a zookeeer connection string that can include a chroot. +func NewKazooFromConnectionString(connectionString string, conf *Config) (*Kazoo, error) { + if conf == nil { + conf = NewConfig() + } + + nodes, chroot := ParseConnectionString(connectionString) + conf.Chroot = chroot + return NewKazoo(nodes, conf) +} + +// Brokers returns a map of all the brokers that make part of the +// Kafka cluster that is registered in Zookeeper. +func (kz *Kazoo) Brokers() (map[int32]string, error) { + root := fmt.Sprintf("%s/brokers/ids", kz.conf.Chroot) + children, _, err := kz.conn.Children(root) + if err != nil { + return nil, err + } + + type brokerEntry struct { + Host string `json:"host"` + Port int `json:"port"` + } + + result := make(map[int32]string) + for _, child := range children { + brokerID, err := strconv.ParseInt(child, 10, 32) + if err != nil { + return nil, err + } + + value, _, err := kz.conn.Get(path.Join(root, child)) + if err != nil { + return nil, err + } + + var brokerNode brokerEntry + if err := json.Unmarshal(value, &brokerNode); err != nil { + return nil, err + } + + result[int32(brokerID)] = fmt.Sprintf("%s:%d", brokerNode.Host, brokerNode.Port) + } + + return result, nil +} + +// BrokerList returns a slice of broker addresses that can be used to connect to +// the Kafka cluster, e.g. using `sarama.NewAsyncProducer()`. +func (kz *Kazoo) BrokerList() ([]string, error) { + brokers, err := kz.Brokers() + if err != nil { + return nil, err + } + + result := make([]string, 0, len(brokers)) + for _, broker := range brokers { + result = append(result, broker) + } + + return result, nil +} + +// BrokerIDList returns a sorted slice of broker ids that can be used for manipulating topics and partitions.`. +func (kz *Kazoo) brokerIDList() ([]int32, error) { + brokers, err := kz.Brokers() + if err != nil { + return nil, err + } + + result := make([]int32, 0, len(brokers)) + for id := range brokers { + result = append(result, id) + } + + // return sorted list to match the offical kafka sdks + sort.Sort(int32Slice(result)) + + return result, nil +} + +// Controller returns what broker is currently acting as controller of the Kafka cluster +func (kz *Kazoo) Controller() (int32, error) { + type controllerEntry struct { + BrokerID int32 `json:"brokerid"` + } + + node := fmt.Sprintf("%s/controller", kz.conf.Chroot) + data, _, err := kz.conn.Get(node) + if err != nil { + return -1, err + } + + var controllerNode controllerEntry + if err := json.Unmarshal(data, &controllerNode); err != nil { + return -1, err + } + + return controllerNode.BrokerID, nil +} + +// Close closes the connection with the Zookeeper cluster +func (kz *Kazoo) Close() error { + kz.conn.Close() + return nil +} + +//////////////////////////////////////////////////////////////////////// +// Util methods +//////////////////////////////////////////////////////////////////////// + +// Exists checks existence of a node +func (kz *Kazoo) exists(node string) (ok bool, err error) { + ok, _, err = kz.conn.Exists(node) + return +} + +// DeleteAll deletes a node recursively +func (kz *Kazoo) deleteRecursive(node string) (err error) { + children, stat, err := kz.conn.Children(node) + if err == zk.ErrNoNode { + return nil + } else if err != nil { + return + } + + for _, child := range children { + if err = kz.deleteRecursive(path.Join(node, child)); err != nil { + return + } + } + + return kz.conn.Delete(node, stat.Version) +} + +// MkdirAll creates a directory recursively +func (kz *Kazoo) mkdirRecursive(node string) (err error) { + parent := path.Dir(node) + if parent != "/" { + if err = kz.mkdirRecursive(parent); err != nil { + return + } + } + + _, err = kz.conn.Create(node, nil, 0, zk.WorldACL(zk.PermAll)) + if err == zk.ErrNodeExists { + err = nil + } + return +} + +// Create stores a new value at node. Fails if already set. +func (kz *Kazoo) create(node string, value []byte, ephemeral bool) (err error) { + if err = kz.mkdirRecursive(path.Dir(node)); err != nil { + return + } + + flags := int32(0) + if ephemeral { + flags = zk.FlagEphemeral + } + _, err = kz.conn.Create(node, value, flags, zk.WorldACL(zk.PermAll)) + return +} + +// createOrUpdate first attempts to update a node. If the nodes does not exist it will create it. +func (kz *Kazoo) createOrUpdate(node string, value []byte, ephemeral bool) (err error) { + if _, err = kz.conn.Set(node, value, -1); err == nil { + return + } + + if err == zk.ErrNoNode { + err = kz.create(node, value, ephemeral) + } + return +} + +// sort interface for int32 slice +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/vendor/github.com/krallistic/kazoo-go/kazoo_test.go b/vendor/github.com/krallistic/kazoo-go/kazoo_test.go new file mode 100644 index 00000000..c32ba0ea --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/kazoo_test.go @@ -0,0 +1,56 @@ +package kazoo + +import ( + "testing" +) + +func TestBuildConnectionString(t *testing.T) { + nodes := []string{"zk1:2181", "zk2:2181", "zk3:2181"} + + if str := BuildConnectionString(nodes); str != "zk1:2181,zk2:2181,zk3:2181" { + t.Errorf("The connection string was not built correctly: %s", str) + } + + if str := BuildConnectionStringWithChroot(nodes, "/chroot"); str != "zk1:2181,zk2:2181,zk3:2181/chroot" { + t.Errorf("The connection string was not built correctly: %s", str) + } +} + +func TestParseConnectionString(t *testing.T) { + var ( + nodes []string + chroot string + ) + + nodes, chroot = ParseConnectionString("zookeeper/chroot") + if len(nodes) != 1 || nodes[0] != "zookeeper" { + t.Error("Parsed nodes incorrectly:", nodes) + } + if chroot != "/chroot" { + t.Error("Parsed chroot incorrectly:", chroot) + } + + nodes, chroot = ParseConnectionString("zk1:2181,zk2:2181,zk3:2181") + if len(nodes) != 3 || nodes[0] != "zk1:2181" || nodes[1] != "zk2:2181" || nodes[2] != "zk3:2181" { + t.Error("Parsed nodes incorrectly:", nodes) + } + if chroot != "" { + t.Error("Parsed chroot incorrectly:", chroot) + } + + nodes, chroot = ParseConnectionString("zk1:2181,zk2/nested/chroot") + if len(nodes) != 2 || nodes[0] != "zk1:2181" || nodes[1] != "zk2" { + t.Error("Parsed nodes incorrectly:", nodes) + } + if chroot != "/nested/chroot" { + t.Error("Parsed chroot incorrectly:", chroot) + } + + nodes, chroot = ParseConnectionString("") + if len(nodes) != 1 || nodes[0] != "" { + t.Error("Parsed nodes incorrectly:", nodes) + } + if chroot != "" { + t.Error("Parsed chroot incorrectly:", chroot) + } +} diff --git a/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/.gitignore b/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/.gitignore new file mode 100644 index 00000000..c93d3c5b --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/.gitignore @@ -0,0 +1,2 @@ +kafka-topics +kafka-topics.test diff --git a/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/kafka-topics.go b/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/kafka-topics.go new file mode 100644 index 00000000..2262f8de --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/tools/kafka-topics/kafka-topics.go @@ -0,0 +1,93 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "os" + "sort" + "sync" + "time" + + "github.com/wvanbergen/kazoo-go" +) + +var ( + zookeeper = flag.String("zookeeper", os.Getenv("ZOOKEEPER_PEERS"), "Zookeeper connection string. It can include a chroot.") + zookeeperTimeout = flag.Int("zookeeper-timeout", 1000, "Zookeeper timeout in milliseconds.") +) + +func main() { + flag.Parse() + + if *zookeeper == "" { + printUsageErrorAndExit("You have to provide a zookeeper connection string using -zookeeper, or the ZOOKEEPER_PEERS environment variable") + } + + conf := kazoo.NewConfig() + conf.Timeout = time.Duration(*zookeeperTimeout) * time.Millisecond + + kz, err := kazoo.NewKazooFromConnectionString(*zookeeper, conf) + if err != nil { + printErrorAndExit(69, "Failed to connect to Zookeeper: %v", err) + } + defer func() { _ = kz.Close() }() + + topics, err := kz.Topics() + if err != nil { + printErrorAndExit(69, "Failed to get Kafka topics from Zookeeper: %v", err) + } + sort.Sort(topics) + + var ( + wg sync.WaitGroup + l sync.Mutex + stdout = make([]string, len(topics)) + ) + + for i, topic := range topics { + wg.Add(1) + go func(i int, topic *kazoo.Topic) { + defer wg.Done() + + buffer := bytes.NewBuffer(make([]byte, 0)) + + partitions, err := topic.Partitions() + if err != nil { + printErrorAndExit(69, "Failed to get Kafka topic partitions from Zookeeper: %v", err) + } + + fmt.Fprintf(buffer, "Topic: %s\tPartitions: %d\n", topic.Name, len(partitions)) + + for _, partition := range partitions { + leader, _ := partition.Leader() + isr, _ := partition.ISR() + + fmt.Fprintf(buffer, "\tPartition: %d\tReplicas: %v\tLeader: %d\tISR: %v\n", partition.ID, partition.Replicas, leader, isr) + } + + l.Lock() + stdout[i] = buffer.String() + l.Unlock() + }(i, topic) + } + + wg.Wait() + for _, msg := range stdout { + fmt.Print(msg) + } +} + +func printUsageErrorAndExit(format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + fmt.Fprintln(os.Stderr, "Available command line options:") + flag.PrintDefaults() + os.Exit(64) +} + +func printErrorAndExit(code int, format string, values ...interface{}) { + fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) + fmt.Fprintln(os.Stderr) + os.Exit(code) +} diff --git a/vendor/github.com/krallistic/kazoo-go/topic_admin.go b/vendor/github.com/krallistic/kazoo-go/topic_admin.go new file mode 100644 index 00000000..8d084adc --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/topic_admin.go @@ -0,0 +1,205 @@ +package kazoo + +import ( + "errors" + "fmt" + "time" + + "github.com/samuel/go-zookeeper/zk" +) + +var ( + ErrTopicExists = errors.New("Topic already exists") + ErrTopicMarkedForDelete = errors.New("Topic is already marked for deletion") + ErrDeletionTimedOut = errors.New("Timed out while waiting for a topic to be deleted") +) + +// CreateTopic creates a new kafka topic with the specified parameters and properties +func (kz *Kazoo) CreateTopic(name string, partitionCount int, replicationFactor int, topicConfig map[string]string) error { + topic := kz.Topic(name) + + // Official kafka sdk checks if topic exists, then always writes the config unconditionally + // but only writes the partition map if ones does not exist. + exists, err := topic.Exists() + if err != nil { + return err + } else if exists { + return ErrTopicExists + } + + brokerList, err := kz.brokerIDList() + if err != nil { + return err + } + + partitionList, err := topic.generatePartitionAssignments(brokerList, partitionCount, replicationFactor) + if err != nil { + return err + } + + configData, err := topic.marshalConfig(topicConfig) + if err != nil { + return err + } + + partitionData, err := topic.marshalPartitions(partitionList) + if err != nil { + return err + } + + if err = kz.createOrUpdate(topic.configPath(), configData, false); err != nil { + return err + } + + if err = kz.create(topic.metadataPath(), partitionData, false); err != nil { + return err + } + + return nil +} + +func cycleBrokers(broker []int32, lastIter int32) int32 { + if int(lastIter+1) == len(broker) { + return 0 + } else { + return lastIter + 1 + } +} + +func contains(slice []int32, value int32) bool{ + for _,v := range slice { + if v == value { + return true + } + } + return false +} + +func getValidBroker(brokerList []int32, removalBrokers []int32) []int32 { + var retVal []int32 + for _, broker := range brokerList { + if !contains(removalBrokers, broker) { + retVal = append(retVal, broker) + } + } + + return retVal +} + +func (kz *Kazoo) RemoveTopicFromBrokers(name string, removalBroker []int32) error { + topic := kz.Topic(name) + var roundRobinBroker int32 + + brokers, err := kz.brokerIDList() + validBrokers := getValidBroker(brokers, removalBroker) + + + currentPartitions, err := topic.Partitions() + if err != nil { + return err + } + for _, partition := range currentPartitions { + replicas := partition.Replicas + newReplicas := make([]int32, len(replicas)) + for i, replica := range replicas { + + if contains(removalBroker, replica) { + //reassign Broker via RoundRobin. + newReplicas[i] = roundRobinBroker + roundRobinBroker = cycleBrokers(validBrokers, roundRobinBroker) + } else { + newReplicas[i] = replica + } + partition.Replicas = newReplicas + } + } + + if err = topic.validatePartitionAssignments(validBrokers, currentPartitions); err != nil { + fmt.Println("Error Validation PartitionAssignment:", err) + return err + } + + partitionData, err := topic.marshalPartitions(currentPartitions) + if err != nil { + fmt.Println(err) + return err + } + + if err = kz.createOrUpdate(topic.metadataPath(), partitionData, false); err != nil { + fmt.Println(err) + return err + } + + return nil +} + +// DeleteTopic marks a kafka topic for deletion. Deleting a topic is asynchronous and +// DeleteTopic will return before Kafka actually does the deletion. +func (kz *Kazoo) DeleteTopic(name string) error { + node := fmt.Sprintf("%s/admin/delete_topics/%s", kz.conf.Chroot, name) + + exists, err := kz.exists(node) + if err != nil { + return err + } + if exists { + return ErrTopicMarkedForDelete + } + + if err := kz.create(node, nil, false); err != nil { + return err + } + return nil +} + +// DeleteTopicSync marks a kafka topic for deletion and waits until it is deleted +// before returning. +func (kz *Kazoo) DeleteTopicSync(name string, timeout time.Duration) error { + err := kz.DeleteTopic(name) + + if err != nil { + return err + } + + topic := kz.Topic(name) + + if exists, err := topic.Exists(); err != nil { + return err + } else if !exists { + return nil + } + + changes, err := topic.Watch() + + if err != nil { + return nil + } + + if timeout > 0 { + + timer := time.NewTimer(timeout) + defer timer.Stop() + + for { + select { + case <-timer.C: + return ErrDeletionTimedOut + + case c := <-changes: + if c.Type == zk.EventNodeDeleted { + return nil + } + } + } + + } else { + for { + select { + case c := <-changes: + if c.Type == zk.EventNodeDeleted { + return nil + } + } + } + } +} diff --git a/vendor/github.com/krallistic/kazoo-go/topic_metadata.go b/vendor/github.com/krallistic/kazoo-go/topic_metadata.go new file mode 100644 index 00000000..66a6b0c4 --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/topic_metadata.go @@ -0,0 +1,413 @@ +package kazoo + +import ( + "encoding/json" + "errors" + "fmt" + "math/rand" + "strconv" + + "github.com/samuel/go-zookeeper/zk" +) + +var ( + ErrInvalidPartitionCount = errors.New("Number of partitions must be larger than 0") + ErrInvalidReplicationFactor = errors.New("Replication factor must be between 1 and the number of brokers") + ErrInvalidReplicaCount = errors.New("All partitions must have the same number of replicas") + ErrReplicaBrokerOverlap = errors.New("All replicas for a partition must be on separate brokers") + ErrInvalidBroker = errors.New("Replica assigned to invalid broker") + ErrMissingPartitionID = errors.New("Partition ids must be sequential starting from 0") + ErrDuplicatePartitionID = errors.New("Each partition must have a unique ID") +) + +// Topic interacts with Kafka's topic metadata in Zookeeper. +type Topic struct { + Name string + kz *Kazoo +} + +// TopicList is a type that implements the sortable interface for a list of Topic instances. +type TopicList []*Topic + +// Partition interacts with Kafka's partition metadata in Zookeeper. +type Partition struct { + topic *Topic + ID int32 + Replicas []int32 +} + +// PartitionList is a type that implements the sortable interface for a list of Partition instances +type PartitionList []*Partition + +// Topics returns a list of all registered Kafka topics. +func (kz *Kazoo) Topics() (TopicList, error) { + root := fmt.Sprintf("%s/brokers/topics", kz.conf.Chroot) + children, _, err := kz.conn.Children(root) + if err != nil { + return nil, err + } + + result := make(TopicList, 0, len(children)) + for _, name := range children { + result = append(result, kz.Topic(name)) + } + return result, nil +} + +// WatchTopics returns a list of all registered Kafka topics, and +// watches that list for changes. +func (kz *Kazoo) WatchTopics() (TopicList, <-chan zk.Event, error) { + root := fmt.Sprintf("%s/brokers/topics", kz.conf.Chroot) + children, _, c, err := kz.conn.ChildrenW(root) + if err != nil { + return nil, nil, err + } + + result := make(TopicList, 0, len(children)) + for _, name := range children { + result = append(result, kz.Topic(name)) + } + return result, c, nil +} + +// Topic returns a Topic instance for a given topic name +func (kz *Kazoo) Topic(topic string) *Topic { + return &Topic{Name: topic, kz: kz} +} + +// Exists returns true if the topic exists on the Kafka cluster. +func (t *Topic) Exists() (bool, error) { + return t.kz.exists(t.metadataPath()) +} + +// Partitions returns a list of all partitions for the topic. +func (t *Topic) Partitions() (PartitionList, error) { + value, _, err := t.kz.conn.Get(t.metadataPath()) + if err != nil { + return nil, err + } + + return t.parsePartitions(value) +} + +// WatchPartitions returns a list of all partitions for the topic, and watches the topic for changes. +func (t *Topic) WatchPartitions() (PartitionList, <-chan zk.Event, error) { + value, _, c, err := t.kz.conn.GetW(t.metadataPath()) + if err != nil { + return nil, nil, err + } + + list, err := t.parsePartitions(value) + return list, c, err +} + +// Watch watches the topic for changes. +func (t *Topic) Watch() (<-chan zk.Event, error) { + _, _, c, err := t.kz.conn.GetW(t.metadataPath()) + if err != nil { + return nil, err + } + + return c, err +} + +type topicMetadata struct { + Version int `json:"version"` + Partitions map[string][]int32 `json:"partitions"` +} + +func (t *Topic) metadataPath() string { + return fmt.Sprintf("%s/brokers/topics/%s", t.kz.conf.Chroot, t.Name) +} + +// parsePartitions parses the JSON representation of the partitions +// that is stored as data on the topic node in Zookeeper. +func (t *Topic) parsePartitions(value []byte) (PartitionList, error) { + var tm topicMetadata + if err := json.Unmarshal(value, &tm); err != nil { + return nil, err + } + + result := make(PartitionList, len(tm.Partitions)) + for partitionNumber, replicas := range tm.Partitions { + partitionID, err := strconv.ParseInt(partitionNumber, 10, 32) + if err != nil { + return nil, err + } + + replicaIDs := make([]int32, 0, len(replicas)) + for _, r := range replicas { + replicaIDs = append(replicaIDs, int32(r)) + } + result[partitionID] = t.Partition(int32(partitionID), replicaIDs) + } + + return result, nil +} + +// marshalPartitions turns a PartitionList into the JSON representation +// to be stored in Zookeeper. +func (t *Topic) marshalPartitions(partitions PartitionList) ([]byte, error) { + tm := topicMetadata{Version: 1, Partitions: make(map[string][]int32, len(partitions))} + for _, part := range partitions { + tm.Partitions[fmt.Sprintf("%d", part.ID)] = part.Replicas + } + return json.Marshal(tm) +} + +// generatePartitionAssignments creates a partition list for a topic. The primary replica for +// each partition is assigned in a round-robin fashion starting at a random broker. +// Additional replicas are assigned to subsequent brokers to ensure there is no overlap +func (t *Topic) generatePartitionAssignments(brokers []int32, partitionCount int, replicationFactor int) (PartitionList, error) { + if partitionCount <= 0 { + return nil, ErrInvalidPartitionCount + } + if replicationFactor <= 0 || len(brokers) < replicationFactor { + return nil, ErrInvalidReplicationFactor + } + + result := make(PartitionList, partitionCount) + + brokerCount := len(brokers) + brokerIdx := rand.Intn(brokerCount) + + for p := 0; p < partitionCount; p++ { + partition := &Partition{topic: t, ID: int32(p), Replicas: make([]int32, replicationFactor)} + + brokerIndices := rand.Perm(len(brokers))[0:replicationFactor] + + for r := 0; r < replicationFactor; r++ { + partition.Replicas[r] = brokers[brokerIndices[r]] + } + + result[p] = partition + brokerIdx = (brokerIdx + 1) % brokerCount + } + + return result, nil +} + +// validatePartitionAssignments ensures that all partitions are assigned to valid brokers, +// have the same number of replicas, and each replica is assigned to a unique broker +func (t *Topic) validatePartitionAssignments(brokers []int32, assignment PartitionList) error { + if len(assignment) == 0 { + return ErrInvalidPartitionCount + } + + // get the first replica count to compare against. Every partition should have the same. + var replicaCount int + for _, part := range assignment { + replicaCount = len(part.Replicas) + break + } + if replicaCount == 0 { + return ErrInvalidReplicationFactor + } + + // ensure all ids are unique and sequential + maxPartitionID := int32(-1) + partitionIDmap := make(map[int32]struct{}, len(assignment)) + + for _, part := range assignment { + if part == nil { + continue + } + if maxPartitionID < part.ID { + maxPartitionID = part.ID + } + partitionIDmap[part.ID] = struct{}{} + + // all partitions require the same replica count + if len(part.Replicas) != replicaCount { + return ErrInvalidReplicaCount + } + + rset := make(map[int32]struct{}, replicaCount) + for _, r := range part.Replicas { + // replica must be assigned to a valid broker + found := false + for _, b := range brokers { + if r == b { + found = true + break + } + } + if !found { + return ErrInvalidBroker + } + rset[r] = struct{}{} + } + // broker assignments for a partition must be unique + if len(rset) != replicaCount { + return ErrReplicaBrokerOverlap + } + } + + // ensure all partitions accounted for + if int(maxPartitionID) != len(assignment)-1 { + return ErrMissingPartitionID + } + + // ensure no duplicate ids + if len(partitionIDmap) != len(assignment) { + return ErrDuplicatePartitionID + } + + return nil +} + +// Partition returns a Partition instance for the topic. +func (t *Topic) Partition(id int32, replicas []int32) *Partition { + return &Partition{ID: id, Replicas: replicas, topic: t} +} + +type topicConfig struct { + Version int `json:"version"` + ConfigMap map[string]string `json:"config"` +} + +// getConfigPath returns the zk node path for a topic's config +func (t *Topic) configPath() string { + return fmt.Sprintf("%s/config/topics/%s", t.kz.conf.Chroot, t.Name) +} + +// parseConfig parses the json representation of a topic config +// and returns the configuration values +func (t *Topic) parseConfig(data []byte) (map[string]string, error) { + var cfg topicConfig + if err := json.Unmarshal(data, &cfg); err != nil { + return nil, err + } + return cfg.ConfigMap, nil +} + +// marshalConfig turns a config map into the json representation +// needed for Zookeeper +func (t *Topic) marshalConfig(data map[string]string) ([]byte, error) { + cfg := topicConfig{Version: 1, ConfigMap: data} + if cfg.ConfigMap == nil { + cfg.ConfigMap = make(map[string]string) + } + return json.Marshal(&cfg) +} + +// Config returns topic-level configuration settings as a map. +func (t *Topic) Config() (map[string]string, error) { + value, _, err := t.kz.conn.Get(t.configPath()) + if err != nil { + return nil, err + } + + return t.parseConfig(value) +} + +// Topic returns the Topic of this partition. +func (p *Partition) Topic() *Topic { + return p.topic +} + +// Key returns a unique identifier for the partition, using the form "topic/partition". +func (p *Partition) Key() string { + return fmt.Sprintf("%s/%d", p.topic.Name, p.ID) +} + +// PreferredReplica returns the preferred replica for this partition. +func (p *Partition) PreferredReplica() int32 { + if len(p.Replicas) > 0 { + return p.Replicas[0] + } else { + return -1 + } +} + +// Leader returns the broker ID of the broker that is currently the leader for the partition. +func (p *Partition) Leader() (int32, error) { + if state, err := p.state(); err != nil { + return -1, err + } else { + return state.Leader, nil + } +} + +// ISR returns the broker IDs of the current in-sync replica set for the partition +func (p *Partition) ISR() ([]int32, error) { + if state, err := p.state(); err != nil { + return nil, err + } else { + return state.ISR, nil + } +} + +func (p *Partition) UnderReplicated() (bool, error) { + if state, err := p.state(); err != nil { + return false, err + } else { + return len(state.ISR) < len(p.Replicas), nil + } +} + +func (p *Partition) UsesPreferredReplica() (bool, error) { + if state, err := p.state(); err != nil { + return false, err + } else { + return len(state.ISR) > 0 && state.ISR[0] == p.Replicas[0], nil + } +} + +// partitionState represents the partition state as it is stored as JSON +// in Zookeeper on the partition's state node. +type partitionState struct { + Leader int32 `json:"leader"` + ISR []int32 `json:"isr"` +} + +// state retrieves and parses the partition State +func (p *Partition) state() (partitionState, error) { + var state partitionState + node := fmt.Sprintf("%s/brokers/topics/%s/partitions/%d/state", p.topic.kz.conf.Chroot, p.topic.Name, p.ID) + value, _, err := p.topic.kz.conn.Get(node) + if err != nil { + return state, err + } + + if err := json.Unmarshal(value, &state); err != nil { + return state, err + } + + return state, nil +} + +// Find returns the topic with the given name if it exists in the topic list, +// and will return `nil` otherwise. +func (tl TopicList) Find(name string) *Topic { + for _, topic := range tl { + if topic.Name == name { + return topic + } + } + return nil +} + +func (tl TopicList) Len() int { + return len(tl) +} + +func (tl TopicList) Less(i, j int) bool { + return tl[i].Name < tl[j].Name +} + +func (tl TopicList) Swap(i, j int) { + tl[i], tl[j] = tl[j], tl[i] +} + +func (pl PartitionList) Len() int { + return len(pl) +} + +func (pl PartitionList) Less(i, j int) bool { + return pl[i].topic.Name < pl[j].topic.Name || (pl[i].topic.Name == pl[j].topic.Name && pl[i].ID < pl[j].ID) +} + +func (pl PartitionList) Swap(i, j int) { + pl[i], pl[j] = pl[j], pl[i] +} diff --git a/vendor/github.com/krallistic/kazoo-go/topic_metadata_test.go b/vendor/github.com/krallistic/kazoo-go/topic_metadata_test.go new file mode 100644 index 00000000..72a3f00c --- /dev/null +++ b/vendor/github.com/krallistic/kazoo-go/topic_metadata_test.go @@ -0,0 +1,210 @@ +package kazoo + +import ( + "sort" + "testing" +) + +func TestPartition(t *testing.T) { + topic := &Topic{Name: "test"} + partition := topic.Partition(1, []int32{1, 2, 3}) + + if key := partition.Key(); key != "test/1" { + t.Error("Unexpected partition key", key) + } + + if partition.Topic() != topic { + t.Error("Expected Topic() to return the topic the partition was created from.") + } + + if pr := partition.PreferredReplica(); pr != 1 { + t.Error("Expected 1 to be the preferred replica, but found", pr) + } + + partitionWithoutReplicas := topic.Partition(1, nil) + if pr := partitionWithoutReplicas.PreferredReplica(); pr != -1 { + t.Error("Expected -1 to be returned if the partition does not have replicas, but found", pr) + } +} + +func TestTopicList(t *testing.T) { + topics := TopicList{ + &Topic{Name: "foo"}, + &Topic{Name: "bar"}, + &Topic{Name: "baz"}, + } + + sort.Sort(topics) + + if topics[0].Name != "bar" || topics[1].Name != "baz" || topics[2].Name != "foo" { + t.Error("Unexpected order after sorting topic list", topics) + } + + topic := topics.Find("foo") + if topic != topics[2] { + t.Error("Should have found foo topic from the list") + } +} + +func TestPartitionList(t *testing.T) { + var ( + topic1 = &Topic{Name: "1"} + topic2 = &Topic{Name: "2"} + ) + + var ( + partition21 = topic2.Partition(1, nil) + partition12 = topic1.Partition(2, nil) + partition11 = topic1.Partition(1, nil) + ) + + partitions := PartitionList{partition21, partition12, partition11} + sort.Sort(partitions) + + if partitions[0] != partition11 || partitions[1] != partition12 || partitions[2] != partition21 { + t.Error("Unexpected order after sorting topic list", partitions) + } +} + +func TestGeneratePartitionAssignments(t *testing.T) { + // check for errors + tests := []struct { + brokers []int32 + partitionCount int + replicationFactor int + err error + }{ + {[]int32{1, 2}, -1, 1, ErrInvalidPartitionCount}, + {[]int32{1, 2}, 0, 1, ErrInvalidPartitionCount}, + {[]int32{}, 1, 1, ErrInvalidReplicationFactor}, + {[]int32{1, 2}, 1, -1, ErrInvalidReplicationFactor}, + {[]int32{1, 2}, 2, 0, ErrInvalidReplicationFactor}, + {[]int32{1, 2}, 3, 3, ErrInvalidReplicationFactor}, + {[]int32{1, 2}, 2, 1, nil}, + {[]int32{1, 2}, 10, 2, nil}, + {[]int32{1}, 10, 1, nil}, + {[]int32{1, 2, 3, 4, 5}, 1, 1, nil}, + {[]int32{1, 2, 3, 4, 5}, 1, 3, nil}, + {[]int32{1, 2, 3, 4, 5}, 10, 2, nil}, + } + + for testIdx, test := range tests { + topic := &Topic{Name: "t"} + + res, err := topic.generatePartitionAssignments(test.brokers, test.partitionCount, test.replicationFactor) + if err != test.err { + t.Errorf("Incorrect error for test %d. Expected (%v) got (%v)", testIdx, test.err, err) + } else if err == nil { + // proper number of paritions + if len(res) != test.partitionCount { + t.Errorf("Wrong number of partitions assigned in test %d. Expected %d got %d", testIdx, test.partitionCount, len(res)) + } + // ensure all petitions are assigned and that they have + // the right number of non-overlapping brokers + for i, part := range res { + if part == nil { + t.Errorf("Partition %d is nil in test %d", i, testIdx) + continue + } + if len(part.Replicas) != test.replicationFactor { + t.Errorf("Partition %d does not have the correct number of brokers in test %d. Expected %d got %d", i, testIdx, test.replicationFactor, len(part.Replicas)) + } + replicaMap := make(map[int32]bool, test.replicationFactor) + for _, r := range part.Replicas { + // ensure broker is in initial broker list + found := false + for _, broker := range test.brokers { + if broker == r { + found = true + break + } + } + if !found { + t.Errorf("Partition %d has an invalid broker id %d in test %d", i, r, testIdx) + } + replicaMap[r] = true + } + if len(replicaMap) != len(part.Replicas) { + t.Errorf("Partition %d has overlapping broker assignments (%v) in test %d", i, part.Replicas, testIdx) + } + } + } + } +} + +func TestValidatePartitionAssignments(t *testing.T) { + // check for errors + tests := []struct { + brokers []int32 + partitions PartitionList + err error + }{ + {[]int32{1}, PartitionList{}, ErrInvalidPartitionCount}, + + {[]int32{1}, PartitionList{ + {ID: 0, Replicas: []int32{}}, + }, ErrInvalidReplicationFactor}, + + {[]int32{1, 2}, PartitionList{ + {ID: 0, Replicas: []int32{1}}, + {ID: 1, Replicas: []int32{1, 2}}, + }, ErrInvalidReplicaCount}, + + {[]int32{1, 2}, PartitionList{ + {ID: 0, Replicas: []int32{1, 2}}, + {ID: 1, Replicas: []int32{1}}, + }, ErrInvalidReplicaCount}, + + {[]int32{1, 2}, PartitionList{ + {ID: 0, Replicas: []int32{1, 2}}, + {ID: 1, Replicas: []int32{2, 2}}, + }, ErrReplicaBrokerOverlap}, + + {[]int32{1, 2}, PartitionList{ + {ID: 0, Replicas: []int32{1, 3}}, + {ID: 1, Replicas: []int32{2, 1}}, + }, ErrInvalidBroker}, + + {[]int32{1, 2, 3}, PartitionList{ + {ID: 1, Replicas: []int32{1, 3}}, + {ID: 2, Replicas: []int32{2, 1}}, + }, ErrMissingPartitionID}, + + {[]int32{1, 2, 3}, PartitionList{ + {ID: 0, Replicas: []int32{1, 3}}, + {ID: 2, Replicas: []int32{2, 1}}, + }, ErrMissingPartitionID}, + + {[]int32{1, 2, 3}, PartitionList{ + {ID: 0, Replicas: []int32{1, 3}}, + {ID: 0, Replicas: []int32{1, 3}}, + {ID: 2, Replicas: []int32{2, 1}}, + }, ErrDuplicatePartitionID}, + + {[]int32{1}, PartitionList{ + {ID: 0, Replicas: []int32{1}}, + }, nil}, + + {[]int32{1}, PartitionList{ + {ID: 0, Replicas: []int32{1}}, + {ID: 1, Replicas: []int32{1}}, + {ID: 2, Replicas: []int32{1}}, + }, nil}, + + {[]int32{1, 2, 3}, PartitionList{ + {ID: 0, Replicas: []int32{1, 2}}, + {ID: 1, Replicas: []int32{2, 3}}, + {ID: 2, Replicas: []int32{3, 1}}, + }, nil}, + } + + for testIdx, test := range tests { + topic := &Topic{Name: "t"} + + err := topic.validatePartitionAssignments(test.brokers, test.partitions) + if err != test.err { + t.Errorf("Incorrect error for test %d. Expected (%v) got (%v)", testIdx, test.err, err) + } else if err == nil { + } + } +} diff --git a/vendor/github.com/samuel/go-zookeeper/.gitignore b/vendor/github.com/samuel/go-zookeeper/.gitignore new file mode 100644 index 00000000..e43b0f98 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/vendor/github.com/samuel/go-zookeeper/.travis.yml b/vendor/github.com/samuel/go-zookeeper/.travis.yml new file mode 100644 index 00000000..65b27a84 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/.travis.yml @@ -0,0 +1,33 @@ +language: go +go: + - 1.9 + +jdk: + - oraclejdk9 + +sudo: false + +branches: + only: + - master + +before_install: + - wget http://apache.cs.utah.edu/zookeeper/zookeeper-${zk_version}/zookeeper-${zk_version}.tar.gz + - tar -zxvf zookeeper*tar.gz && zip -d zookeeper-${zk_version}/contrib/fatjar/zookeeper-${zk_version}-fatjar.jar 'META-INF/*.SF' 'META-INF/*.DSA' + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + +script: + - jdk_switcher use oraclejdk9 + - go build ./... + - go fmt ./... + - go vet ./... + - go test -i -race ./... + - go test -race -covermode atomic -coverprofile=profile.cov ./zk + - goveralls -coverprofile=profile.cov -service=travis-ci + +env: + global: + secure: Coha3DDcXmsekrHCZlKvRAc+pMBaQU1QS/3++3YCCUXVDBWgVsC1ZIc9df4RLdZ/ncGd86eoRq/S+zyn1XbnqK5+ePqwJoUnJ59BE8ZyHLWI9ajVn3fND1MTduu/ksGsS79+IYbdVI5wgjSgjD3Ktp6Y5uPl+BPosjYBGdNcHS4= + matrix: + - zk_version=3.4.10 diff --git a/vendor/github.com/samuel/go-zookeeper/LICENSE b/vendor/github.com/samuel/go-zookeeper/LICENSE new file mode 100644 index 00000000..bc00498c --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013, Samuel Stauffer +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +* Neither the name of the author nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/samuel/go-zookeeper/README.md b/vendor/github.com/samuel/go-zookeeper/README.md new file mode 100644 index 00000000..afc1d083 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/README.md @@ -0,0 +1,11 @@ +Native Go Zookeeper Client Library +=================================== + +[![GoDoc](https://godoc.org/github.com/samuel/go-zookeeper?status.svg)](https://godoc.org/github.com/samuel/go-zookeeper) +[![Build Status](https://travis-ci.org/samuel/go-zookeeper.png)](https://travis-ci.org/samuel/go-zookeeper) +[![Coverage Status](https://coveralls.io/repos/github/samuel/go-zookeeper/badge.svg?branch=master)](https://coveralls.io/github/samuel/go-zookeeper?branch=master) + +License +------- + +3-clause BSD. See LICENSE file. diff --git a/vendor/github.com/samuel/go-zookeeper/examples/basic.go b/vendor/github.com/samuel/go-zookeeper/examples/basic.go new file mode 100644 index 00000000..28dfa633 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/examples/basic.go @@ -0,0 +1,22 @@ +package main + +import ( + "fmt" + "time" + + "github.com/samuel/go-zookeeper/zk" +) + +func main() { + c, _, err := zk.Connect([]string{"127.0.0.1"}, time.Second) //*10) + if err != nil { + panic(err) + } + children, stat, ch, err := c.ChildrenW("/") + if err != nil { + panic(err) + } + fmt.Printf("%+v %+v\n", children, stat) + e := <-ch + fmt.Printf("%+v\n", e) +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/cluster_test.go b/vendor/github.com/samuel/go-zookeeper/zk/cluster_test.go new file mode 100644 index 00000000..dcceaa46 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/cluster_test.go @@ -0,0 +1,314 @@ +package zk + +import ( + "sync" + "testing" + "time" +) + +type logWriter struct { + t *testing.T + p string +} + +func (lw logWriter) Write(b []byte) (int, error) { + lw.t.Logf("%s%s", lw.p, string(b)) + return len(b), nil +} + +func TestBasicCluster(t *testing.T) { + ts, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk1, err := ts.Connect(0) + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk1.Close() + zk2, err := ts.Connect(1) + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk2.Close() + + time.Sleep(time.Second * 5) + + if _, err := zk1.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create failed on node 1: %+v", err) + } + if by, _, err := zk2.Get("/gozk-test"); err != nil { + t.Fatalf("Get failed on node 2: %+v", err) + } else if string(by) != "foo-cluster" { + t.Fatal("Wrong data for node 2") + } +} + +// If the current leader dies, then the session is reestablished with the new one. +func TestClientClusterFailover(t *testing.T) { + tc, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer tc.Stop() + zk, evCh, err := tc.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + sl := NewStateLogger(evCh) + + hasSessionEvent1 := sl.NewWatcher(sessionStateMatcher(StateHasSession)).Wait(8 * time.Second) + if hasSessionEvent1 == nil { + t.Fatalf("Failed to connect and get session") + } + + if _, err := zk.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create failed on node 1: %+v", err) + } + + hasSessionWatcher2 := sl.NewWatcher(sessionStateMatcher(StateHasSession)) + + // Kill the current leader + tc.StopServer(hasSessionEvent1.Server) + + // Wait for the session to be reconnected with the new leader. + if hasSessionWatcher2.Wait(8*time.Second) == nil { + t.Fatalf("Failover failed") + } + + if by, _, err := zk.Get("/gozk-test"); err != nil { + t.Fatalf("Get failed on node 2: %+v", err) + } else if string(by) != "foo-cluster" { + t.Fatal("Wrong data for node 2") + } +} + +// If a ZooKeeper cluster looses quorum then a session is reconnected as soon +// as the quorum is restored. +func TestNoQuorum(t *testing.T) { + tc, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer tc.Stop() + zk, evCh, err := tc.ConnectAllTimeout(4 * time.Second) + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + sl := NewStateLogger(evCh) + + // Wait for initial session to be established + hasSessionEvent1 := sl.NewWatcher(sessionStateMatcher(StateHasSession)).Wait(8 * time.Second) + if hasSessionEvent1 == nil { + t.Fatalf("Failed to connect and get session") + } + initialSessionID := zk.sessionID + DefaultLogger.Printf(" Session established: id=%d, timeout=%d", zk.sessionID, zk.sessionTimeoutMs) + + // Kill the ZooKeeper leader and wait for the session to reconnect. + DefaultLogger.Printf(" Kill the leader") + disconnectWatcher1 := sl.NewWatcher(sessionStateMatcher(StateDisconnected)) + hasSessionWatcher2 := sl.NewWatcher(sessionStateMatcher(StateHasSession)) + tc.StopServer(hasSessionEvent1.Server) + + disconnectedEvent1 := disconnectWatcher1.Wait(8 * time.Second) + if disconnectedEvent1 == nil { + t.Fatalf("Failover failed, missed StateDisconnected event") + } + if disconnectedEvent1.Server != hasSessionEvent1.Server { + t.Fatalf("Unexpected StateDisconnected event, expected=%s, actual=%s", + hasSessionEvent1.Server, disconnectedEvent1.Server) + } + + hasSessionEvent2 := hasSessionWatcher2.Wait(8 * time.Second) + if hasSessionEvent2 == nil { + t.Fatalf("Failover failed, missed StateHasSession event") + } + + // Kill the ZooKeeper leader leaving the cluster without quorum. + DefaultLogger.Printf(" Kill the leader") + disconnectWatcher2 := sl.NewWatcher(sessionStateMatcher(StateDisconnected)) + tc.StopServer(hasSessionEvent2.Server) + + disconnectedEvent2 := disconnectWatcher2.Wait(8 * time.Second) + if disconnectedEvent2 == nil { + t.Fatalf("Failover failed, missed StateDisconnected event") + } + if disconnectedEvent2.Server != hasSessionEvent2.Server { + t.Fatalf("Unexpected StateDisconnected event, expected=%s, actual=%s", + hasSessionEvent2.Server, disconnectedEvent2.Server) + } + + // Make sure that we keep retrying connecting to the only remaining + // ZooKeeper server, but the attempts are being dropped because there is + // no quorum. + DefaultLogger.Printf(" Retrying no luck...") + var firstDisconnect *Event + begin := time.Now() + for time.Now().Sub(begin) < 6*time.Second { + disconnectedEvent := sl.NewWatcher(sessionStateMatcher(StateDisconnected)).Wait(4 * time.Second) + if disconnectedEvent == nil { + t.Fatalf("Disconnected event expected") + } + if firstDisconnect == nil { + firstDisconnect = disconnectedEvent + continue + } + if disconnectedEvent.Server != firstDisconnect.Server { + t.Fatalf("Disconnect from wrong server: expected=%s, actual=%s", + firstDisconnect.Server, disconnectedEvent.Server) + } + } + + // Start a ZooKeeper node to restore quorum. + hasSessionWatcher3 := sl.NewWatcher(sessionStateMatcher(StateHasSession)) + tc.StartServer(hasSessionEvent1.Server) + + // Make sure that session is reconnected with the same ID. + hasSessionEvent3 := hasSessionWatcher3.Wait(8 * time.Second) + if hasSessionEvent3 == nil { + t.Fatalf("Session has not been reconnected") + } + if zk.sessionID != initialSessionID { + t.Fatalf("Wrong session ID: expected=%d, actual=%d", initialSessionID, zk.sessionID) + } + + // Make sure that the session is not dropped soon after reconnect + e := sl.NewWatcher(sessionStateMatcher(StateDisconnected)).Wait(6 * time.Second) + if e != nil { + t.Fatalf("Unexpected disconnect") + } +} + +func TestWaitForClose(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, err := ts.Connect(0) + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + timeout := time.After(30 * time.Second) +CONNECTED: + for { + select { + case ev := <-zk.eventChan: + if ev.State == StateConnected { + break CONNECTED + } + case <-timeout: + zk.Close() + t.Fatal("Timeout") + } + } + zk.Close() + for { + select { + case _, ok := <-zk.eventChan: + if !ok { + return + } + case <-timeout: + t.Fatal("Timeout") + } + } +} + +func TestBadSession(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + + zk.conn.Close() + time.Sleep(time.Millisecond * 100) + + if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } +} + +type EventLogger struct { + events []Event + watchers []*EventWatcher + lock sync.Mutex + wg sync.WaitGroup +} + +func NewStateLogger(eventCh <-chan Event) *EventLogger { + el := &EventLogger{} + el.wg.Add(1) + go func() { + defer el.wg.Done() + for event := range eventCh { + el.lock.Lock() + for _, sw := range el.watchers { + if !sw.triggered && sw.matcher(event) { + sw.triggered = true + sw.matchCh <- event + } + } + DefaultLogger.Printf(" event received: %v\n", event) + el.events = append(el.events, event) + el.lock.Unlock() + } + }() + return el +} + +func (el *EventLogger) NewWatcher(matcher func(Event) bool) *EventWatcher { + ew := &EventWatcher{matcher: matcher, matchCh: make(chan Event, 1)} + el.lock.Lock() + el.watchers = append(el.watchers, ew) + el.lock.Unlock() + return ew +} + +func (el *EventLogger) Events() []Event { + el.lock.Lock() + transitions := make([]Event, len(el.events)) + copy(transitions, el.events) + el.lock.Unlock() + return transitions +} + +func (el *EventLogger) Wait4Stop() { + el.wg.Wait() +} + +type EventWatcher struct { + matcher func(Event) bool + matchCh chan Event + triggered bool +} + +func (ew *EventWatcher) Wait(timeout time.Duration) *Event { + select { + case event := <-ew.matchCh: + return &event + case <-time.After(timeout): + return nil + } +} + +func sessionStateMatcher(s State) func(Event) bool { + return func(e Event) bool { + return e.Type == EventSession && e.State == s + } +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/conn.go b/vendor/github.com/samuel/go-zookeeper/zk/conn.go new file mode 100644 index 00000000..f79a51b3 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/conn.go @@ -0,0 +1,1228 @@ +// Package zk is a native Go client library for the ZooKeeper orchestration service. +package zk + +/* +TODO: +* make sure a ping response comes back in a reasonable time + +Possible watcher events: +* Event{Type: EventNotWatching, State: StateDisconnected, Path: path, Err: err} +*/ + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// ErrNoServer indicates that an operation cannot be completed +// because attempts to connect to all servers in the list failed. +var ErrNoServer = errors.New("zk: could not connect to a server") + +// ErrInvalidPath indicates that an operation was being attempted on +// an invalid path. (e.g. empty path) +var ErrInvalidPath = errors.New("zk: invalid path") + +// DefaultLogger uses the stdlib log package for logging. +var DefaultLogger Logger = defaultLogger{} + +const ( + bufferSize = 1536 * 1024 + eventChanSize = 6 + sendChanSize = 16 + protectedPrefix = "_c_" +) + +type watchType int + +const ( + watchTypeData = iota + watchTypeExist + watchTypeChild +) + +type watchPathType struct { + path string + wType watchType +} + +type Dialer func(network, address string, timeout time.Duration) (net.Conn, error) + +// Logger is an interface that can be implemented to provide custom log output. +type Logger interface { + Printf(string, ...interface{}) +} + +type authCreds struct { + scheme string + auth []byte +} + +type Conn struct { + lastZxid int64 + sessionID int64 + state State // must be 32-bit aligned + xid uint32 + sessionTimeoutMs int32 // session timeout in milliseconds + passwd []byte + + dialer Dialer + hostProvider HostProvider + serverMu sync.Mutex // protects server + server string // remember the address/port of the current server + conn net.Conn + eventChan chan Event + eventCallback EventCallback // may be nil + shouldQuit chan struct{} + pingInterval time.Duration + recvTimeout time.Duration + connectTimeout time.Duration + maxBufferSize int + + creds []authCreds + credsMu sync.Mutex // protects server + + sendChan chan *request + requests map[int32]*request // Xid -> pending request + requestsLock sync.Mutex + watchers map[watchPathType][]chan Event + watchersLock sync.Mutex + closeChan chan struct{} // channel to tell send loop stop + + // Debug (used by unit tests) + reconnectLatch chan struct{} + setWatchLimit int + setWatchCallback func([]*setWatchesRequest) + // Debug (for recurring re-auth hang) + debugCloseRecvLoop bool + debugReauthDone chan struct{} + + logger Logger + logInfo bool // true if information messages are logged; false if only errors are logged + + buf []byte +} + +// connOption represents a connection option. +type connOption func(c *Conn) + +type request struct { + xid int32 + opcode int32 + pkt interface{} + recvStruct interface{} + recvChan chan response + + // Because sending and receiving happen in separate go routines, there's + // a possible race condition when creating watches from outside the read + // loop. We must ensure that a watcher gets added to the list synchronously + // with the response from the server on any request that creates a watch. + // In order to not hard code the watch logic for each opcode in the recv + // loop the caller can use recvFunc to insert some synchronously code + // after a response. + recvFunc func(*request, *responseHeader, error) +} + +type response struct { + zxid int64 + err error +} + +type Event struct { + Type EventType + State State + Path string // For non-session events, the path of the watched node. + Err error + Server string // For connection events +} + +// HostProvider is used to represent a set of hosts a ZooKeeper client should connect to. +// It is an analog of the Java equivalent: +// http://svn.apache.org/viewvc/zookeeper/trunk/src/java/main/org/apache/zookeeper/client/HostProvider.java?view=markup +type HostProvider interface { + // Init is called first, with the servers specified in the connection string. + Init(servers []string) error + // Len returns the number of servers. + Len() int + // Next returns the next server to connect to. retryStart will be true if we've looped through + // all known servers without Connected() being called. + Next() (server string, retryStart bool) + // Notify the HostProvider of a successful connection. + Connected() +} + +// ConnectWithDialer establishes a new connection to a pool of zookeeper servers +// using a custom Dialer. See Connect for further information about session timeout. +// This method is deprecated and provided for compatibility: use the WithDialer option instead. +func ConnectWithDialer(servers []string, sessionTimeout time.Duration, dialer Dialer) (*Conn, <-chan Event, error) { + return Connect(servers, sessionTimeout, WithDialer(dialer)) +} + +// Connect establishes a new connection to a pool of zookeeper +// servers. The provided session timeout sets the amount of time for which +// a session is considered valid after losing connection to a server. Within +// the session timeout it's possible to reestablish a connection to a different +// server and keep the same session. This is means any ephemeral nodes and +// watches are maintained. +func Connect(servers []string, sessionTimeout time.Duration, options ...connOption) (*Conn, <-chan Event, error) { + if len(servers) == 0 { + return nil, nil, errors.New("zk: server list must not be empty") + } + + srvs := make([]string, len(servers)) + + for i, addr := range servers { + if strings.Contains(addr, ":") { + srvs[i] = addr + } else { + srvs[i] = addr + ":" + strconv.Itoa(DefaultPort) + } + } + + // Randomize the order of the servers to avoid creating hotspots + stringShuffle(srvs) + + ec := make(chan Event, eventChanSize) + conn := &Conn{ + dialer: net.DialTimeout, + hostProvider: &DNSHostProvider{}, + conn: nil, + state: StateDisconnected, + eventChan: ec, + shouldQuit: make(chan struct{}), + connectTimeout: 1 * time.Second, + sendChan: make(chan *request, sendChanSize), + requests: make(map[int32]*request), + watchers: make(map[watchPathType][]chan Event), + passwd: emptyPassword, + logger: DefaultLogger, + logInfo: true, // default is true for backwards compatability + buf: make([]byte, bufferSize), + } + + // Set provided options. + for _, option := range options { + option(conn) + } + + if err := conn.hostProvider.Init(srvs); err != nil { + return nil, nil, err + } + + conn.setTimeouts(int32(sessionTimeout / time.Millisecond)) + + go func() { + conn.loop() + conn.flushRequests(ErrClosing) + conn.invalidateWatches(ErrClosing) + close(conn.eventChan) + }() + return conn, ec, nil +} + +// WithDialer returns a connection option specifying a non-default Dialer. +func WithDialer(dialer Dialer) connOption { + return func(c *Conn) { + c.dialer = dialer + } +} + +// WithHostProvider returns a connection option specifying a non-default HostProvider. +func WithHostProvider(hostProvider HostProvider) connOption { + return func(c *Conn) { + c.hostProvider = hostProvider + } +} + +// WithLogger returns a connection option specifying a non-default Logger +func WithLogger(logger Logger) connOption { + return func(c *Conn) { + c.logger = logger + } +} + +// WithLogInfo returns a connection option specifying whether or not information messages +// shoud be logged. +func WithLogInfo(logInfo bool) connOption { + return func(c *Conn) { + c.logInfo = logInfo + } +} + +// EventCallback is a function that is called when an Event occurs. +type EventCallback func(Event) + +// WithEventCallback returns a connection option that specifies an event +// callback. +// The callback must not block - doing so would delay the ZK go routines. +func WithEventCallback(cb EventCallback) connOption { + return func(c *Conn) { + c.eventCallback = cb + } +} + +// WithMaxBufferSize sets the maximum buffer size used to read and decode +// packets received from the Zookeeper server. The standard Zookeeper client for +// Java defaults to a limit of 1mb. For backwards compatibility, this Go client +// defaults to unbounded unless overridden via this option. A value that is zero +// or negative indicates that no limit is enforced. +// +// This is meant to prevent resource exhaustion in the face of potentially +// malicious data in ZK. It should generally match the server setting (which +// also defaults ot 1mb) so that clients and servers agree on the limits for +// things like the size of data in an individual znode and the total size of a +// transaction. +// +// For production systems, this should be set to a reasonable value (ideally +// that matches the server configuration). For ops tooling, it is handy to use a +// much larger limit, in order to do things like clean-up problematic state in +// the ZK tree. For example, if a single znode has a huge number of children, it +// is possible for the response to a "list children" operation to exceed this +// buffer size and cause errors in clients. The only way to subsequently clean +// up the tree (by removing superfluous children) is to use a client configured +// with a larger buffer size that can successfully query for all of the child +// names and then remove them. (Note there are other tools that can list all of +// the child names without an increased buffer size in the client, but they work +// by inspecting the servers' transaction logs to enumerate children instead of +// sending an online request to a server. +func WithMaxBufferSize(maxBufferSize int) connOption { + return func(c *Conn) { + c.maxBufferSize = maxBufferSize + } +} + +// WithMaxConnBufferSize sets maximum buffer size used to send and encode +// packets to Zookeeper server. The standard Zookeepeer client for java defaults +// to a limit of 1mb. This option should be used for non-standard server setup +// where znode is bigger than default 1mb. +func WithMaxConnBufferSize(maxBufferSize int) connOption { + return func(c *Conn) { + c.buf = make([]byte, maxBufferSize) + } +} + +func (c *Conn) Close() { + close(c.shouldQuit) + + select { + case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil): + case <-time.After(time.Second): + } +} + +// State returns the current state of the connection. +func (c *Conn) State() State { + return State(atomic.LoadInt32((*int32)(&c.state))) +} + +// SessionID returns the current session id of the connection. +func (c *Conn) SessionID() int64 { + return atomic.LoadInt64(&c.sessionID) +} + +// SetLogger sets the logger to be used for printing errors. +// Logger is an interface provided by this package. +func (c *Conn) SetLogger(l Logger) { + c.logger = l +} + +func (c *Conn) setTimeouts(sessionTimeoutMs int32) { + c.sessionTimeoutMs = sessionTimeoutMs + sessionTimeout := time.Duration(sessionTimeoutMs) * time.Millisecond + c.recvTimeout = sessionTimeout * 2 / 3 + c.pingInterval = c.recvTimeout / 2 +} + +func (c *Conn) setState(state State) { + atomic.StoreInt32((*int32)(&c.state), int32(state)) + c.sendEvent(Event{Type: EventSession, State: state, Server: c.Server()}) +} + +func (c *Conn) sendEvent(evt Event) { + if c.eventCallback != nil { + c.eventCallback(evt) + } + + select { + case c.eventChan <- evt: + default: + // panic("zk: event channel full - it must be monitored and never allowed to be full") + } +} + +func (c *Conn) connect() error { + var retryStart bool + for { + c.serverMu.Lock() + c.server, retryStart = c.hostProvider.Next() + c.serverMu.Unlock() + c.setState(StateConnecting) + if retryStart { + c.flushUnsentRequests(ErrNoServer) + select { + case <-time.After(time.Second): + // pass + case <-c.shouldQuit: + c.setState(StateDisconnected) + c.flushUnsentRequests(ErrClosing) + return ErrClosing + } + } + + zkConn, err := c.dialer("tcp", c.Server(), c.connectTimeout) + if err == nil { + c.conn = zkConn + c.setState(StateConnected) + if c.logInfo { + c.logger.Printf("Connected to %s", c.Server()) + } + return nil + } + + c.logger.Printf("Failed to connect to %s: %+v", c.Server(), err) + } +} + +func (c *Conn) resendZkAuth(reauthReadyChan chan struct{}) { + shouldCancel := func() bool { + select { + case <-c.shouldQuit: + return true + case <-c.closeChan: + return true + default: + return false + } + } + + c.credsMu.Lock() + defer c.credsMu.Unlock() + + defer close(reauthReadyChan) + + if c.logInfo { + c.logger.Printf("Re-submitting `%d` credentials after reconnect", + len(c.creds)) + } + + for _, cred := range c.creds { + if shouldCancel() { + c.logger.Printf("Cancel rer-submitting credentials") + return + } + resChan, err := c.sendRequest( + opSetAuth, + &setAuthRequest{Type: 0, + Scheme: cred.scheme, + Auth: cred.auth, + }, + &setAuthResponse{}, + nil) + + if err != nil { + c.logger.Printf("Call to sendRequest failed during credential resubmit: %s", err) + // FIXME(prozlach): lets ignore errors for now + continue + } + + var res response + select { + case res = <-resChan: + case <-c.closeChan: + c.logger.Printf("Recv closed, cancel re-submitting credentials") + return + case <-c.shouldQuit: + c.logger.Printf("Should quit, cancel re-submitting credentials") + return + } + if res.err != nil { + c.logger.Printf("Credential re-submit failed: %s", res.err) + // FIXME(prozlach): lets ignore errors for now + continue + } + } +} + +func (c *Conn) sendRequest( + opcode int32, + req interface{}, + res interface{}, + recvFunc func(*request, *responseHeader, error), +) ( + <-chan response, + error, +) { + rq := &request{ + xid: c.nextXid(), + opcode: opcode, + pkt: req, + recvStruct: res, + recvChan: make(chan response, 1), + recvFunc: recvFunc, + } + + if err := c.sendData(rq); err != nil { + return nil, err + } + + return rq.recvChan, nil +} + +func (c *Conn) loop() { + for { + if err := c.connect(); err != nil { + // c.Close() was called + return + } + + err := c.authenticate() + switch { + case err == ErrSessionExpired: + c.logger.Printf("Authentication failed: %s", err) + c.invalidateWatches(err) + case err != nil && c.conn != nil: + c.logger.Printf("Authentication failed: %s", err) + c.conn.Close() + case err == nil: + if c.logInfo { + c.logger.Printf("Authenticated: id=%d, timeout=%d", c.SessionID(), c.sessionTimeoutMs) + } + c.hostProvider.Connected() // mark success + c.closeChan = make(chan struct{}) // channel to tell send loop stop + reauthChan := make(chan struct{}) // channel to tell send loop that authdata has been resubmitted + + var wg sync.WaitGroup + wg.Add(1) + go func() { + <-reauthChan + if c.debugCloseRecvLoop { + close(c.debugReauthDone) + } + err := c.sendLoop() + if err != nil || c.logInfo { + c.logger.Printf("Send loop terminated: err=%v", err) + } + c.conn.Close() // causes recv loop to EOF/exit + wg.Done() + }() + + wg.Add(1) + go func() { + var err error + if c.debugCloseRecvLoop { + err = errors.New("DEBUG: close recv loop") + } else { + err = c.recvLoop(c.conn) + } + if err != io.EOF || c.logInfo { + c.logger.Printf("Recv loop terminated: err=%v", err) + } + if err == nil { + panic("zk: recvLoop should never return nil error") + } + close(c.closeChan) // tell send loop to exit + wg.Done() + }() + + c.resendZkAuth(reauthChan) + + c.sendSetWatches() + wg.Wait() + } + + c.setState(StateDisconnected) + + select { + case <-c.shouldQuit: + c.flushRequests(ErrClosing) + return + default: + } + + if err != ErrSessionExpired { + err = ErrConnectionClosed + } + c.flushRequests(err) + + if c.reconnectLatch != nil { + select { + case <-c.shouldQuit: + return + case <-c.reconnectLatch: + } + } + } +} + +func (c *Conn) flushUnsentRequests(err error) { + for { + select { + default: + return + case req := <-c.sendChan: + req.recvChan <- response{-1, err} + } + } +} + +// Send error to all pending requests and clear request map +func (c *Conn) flushRequests(err error) { + c.requestsLock.Lock() + for _, req := range c.requests { + req.recvChan <- response{-1, err} + } + c.requests = make(map[int32]*request) + c.requestsLock.Unlock() +} + +// Send error to all watchers and clear watchers map +func (c *Conn) invalidateWatches(err error) { + c.watchersLock.Lock() + defer c.watchersLock.Unlock() + + if len(c.watchers) >= 0 { + for pathType, watchers := range c.watchers { + ev := Event{Type: EventNotWatching, State: StateDisconnected, Path: pathType.path, Err: err} + for _, ch := range watchers { + ch <- ev + close(ch) + } + } + c.watchers = make(map[watchPathType][]chan Event) + } +} + +func (c *Conn) sendSetWatches() { + c.watchersLock.Lock() + defer c.watchersLock.Unlock() + + if len(c.watchers) == 0 { + return + } + + // NB: A ZK server, by default, rejects packets >1mb. So, if we have too + // many watches to reset, we need to break this up into multiple packets + // to avoid hitting that limit. Mirroring the Java client behavior: we are + // conservative in that we limit requests to 128kb (since server limit is + // is actually configurable and could conceivably be configured smaller + // than default of 1mb). + limit := 128 * 1024 + if c.setWatchLimit > 0 { + limit = c.setWatchLimit + } + + var reqs []*setWatchesRequest + var req *setWatchesRequest + var sizeSoFar int + + n := 0 + for pathType, watchers := range c.watchers { + if len(watchers) == 0 { + continue + } + addlLen := 4 + len(pathType.path) + if req == nil || sizeSoFar+addlLen > limit { + if req != nil { + // add to set of requests that we'll send + reqs = append(reqs, req) + } + sizeSoFar = 28 // fixed overhead of a set-watches packet + req = &setWatchesRequest{ + RelativeZxid: c.lastZxid, + DataWatches: make([]string, 0), + ExistWatches: make([]string, 0), + ChildWatches: make([]string, 0), + } + } + sizeSoFar += addlLen + switch pathType.wType { + case watchTypeData: + req.DataWatches = append(req.DataWatches, pathType.path) + case watchTypeExist: + req.ExistWatches = append(req.ExistWatches, pathType.path) + case watchTypeChild: + req.ChildWatches = append(req.ChildWatches, pathType.path) + } + n++ + } + if n == 0 { + return + } + if req != nil { // don't forget any trailing packet we were building + reqs = append(reqs, req) + } + + if c.setWatchCallback != nil { + c.setWatchCallback(reqs) + } + + go func() { + res := &setWatchesResponse{} + // TODO: Pipeline these so queue all of them up before waiting on any + // response. That will require some investigation to make sure there + // aren't failure modes where a blocking write to the channel of requests + // could hang indefinitely and cause this goroutine to leak... + for _, req := range reqs { + _, err := c.request(opSetWatches, req, res, nil) + if err != nil { + c.logger.Printf("Failed to set previous watches: %s", err.Error()) + break + } + } + }() +} + +func (c *Conn) authenticate() error { + buf := make([]byte, 256) + + // Encode and send a connect request. + n, err := encodePacket(buf[4:], &connectRequest{ + ProtocolVersion: protocolVersion, + LastZxidSeen: c.lastZxid, + TimeOut: c.sessionTimeoutMs, + SessionID: c.SessionID(), + Passwd: c.passwd, + }) + if err != nil { + return err + } + + binary.BigEndian.PutUint32(buf[:4], uint32(n)) + + c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10)) + _, err = c.conn.Write(buf[:n+4]) + c.conn.SetWriteDeadline(time.Time{}) + if err != nil { + return err + } + + // Receive and decode a connect response. + c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10)) + _, err = io.ReadFull(c.conn, buf[:4]) + c.conn.SetReadDeadline(time.Time{}) + if err != nil { + return err + } + + blen := int(binary.BigEndian.Uint32(buf[:4])) + if cap(buf) < blen { + buf = make([]byte, blen) + } + + _, err = io.ReadFull(c.conn, buf[:blen]) + if err != nil { + return err + } + + r := connectResponse{} + _, err = decodePacket(buf[:blen], &r) + if err != nil { + return err + } + if r.SessionID == 0 { + atomic.StoreInt64(&c.sessionID, int64(0)) + c.passwd = emptyPassword + c.lastZxid = 0 + c.setState(StateExpired) + return ErrSessionExpired + } + + atomic.StoreInt64(&c.sessionID, r.SessionID) + c.setTimeouts(r.TimeOut) + c.passwd = r.Passwd + c.setState(StateHasSession) + + return nil +} + +func (c *Conn) sendData(req *request) error { + header := &requestHeader{req.xid, req.opcode} + n, err := encodePacket(c.buf[4:], header) + if err != nil { + req.recvChan <- response{-1, err} + return nil + } + + n2, err := encodePacket(c.buf[4+n:], req.pkt) + if err != nil { + req.recvChan <- response{-1, err} + return nil + } + + n += n2 + + binary.BigEndian.PutUint32(c.buf[:4], uint32(n)) + + c.requestsLock.Lock() + select { + case <-c.closeChan: + req.recvChan <- response{-1, ErrConnectionClosed} + c.requestsLock.Unlock() + return ErrConnectionClosed + default: + } + c.requests[req.xid] = req + c.requestsLock.Unlock() + + c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) + _, err = c.conn.Write(c.buf[:n+4]) + c.conn.SetWriteDeadline(time.Time{}) + if err != nil { + req.recvChan <- response{-1, err} + c.conn.Close() + return err + } + + return nil +} + +func (c *Conn) sendLoop() error { + pingTicker := time.NewTicker(c.pingInterval) + defer pingTicker.Stop() + + for { + select { + case req := <-c.sendChan: + if err := c.sendData(req); err != nil { + return err + } + case <-pingTicker.C: + n, err := encodePacket(c.buf[4:], &requestHeader{Xid: -2, Opcode: opPing}) + if err != nil { + panic("zk: opPing should never fail to serialize") + } + + binary.BigEndian.PutUint32(c.buf[:4], uint32(n)) + + c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) + _, err = c.conn.Write(c.buf[:n+4]) + c.conn.SetWriteDeadline(time.Time{}) + if err != nil { + c.conn.Close() + return err + } + case <-c.closeChan: + return nil + } + } +} + +func (c *Conn) recvLoop(conn net.Conn) error { + sz := bufferSize + if c.maxBufferSize > 0 && sz > c.maxBufferSize { + sz = c.maxBufferSize + } + buf := make([]byte, sz) + for { + // package length + conn.SetReadDeadline(time.Now().Add(c.recvTimeout)) + _, err := io.ReadFull(conn, buf[:4]) + if err != nil { + return err + } + + blen := int(binary.BigEndian.Uint32(buf[:4])) + if cap(buf) < blen { + if c.maxBufferSize > 0 && blen > c.maxBufferSize { + return fmt.Errorf("received packet from server with length %d, which exceeds max buffer size %d", blen, c.maxBufferSize) + } + buf = make([]byte, blen) + } + + _, err = io.ReadFull(conn, buf[:blen]) + conn.SetReadDeadline(time.Time{}) + if err != nil { + return err + } + + res := responseHeader{} + _, err = decodePacket(buf[:16], &res) + if err != nil { + return err + } + + if res.Xid == -1 { + res := &watcherEvent{} + _, err := decodePacket(buf[16:blen], res) + if err != nil { + return err + } + ev := Event{ + Type: res.Type, + State: res.State, + Path: res.Path, + Err: nil, + } + c.sendEvent(ev) + wTypes := make([]watchType, 0, 2) + switch res.Type { + case EventNodeCreated: + wTypes = append(wTypes, watchTypeExist) + case EventNodeDeleted, EventNodeDataChanged: + wTypes = append(wTypes, watchTypeExist, watchTypeData, watchTypeChild) + case EventNodeChildrenChanged: + wTypes = append(wTypes, watchTypeChild) + } + c.watchersLock.Lock() + for _, t := range wTypes { + wpt := watchPathType{res.Path, t} + if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 { + for _, ch := range watchers { + ch <- ev + close(ch) + } + delete(c.watchers, wpt) + } + } + c.watchersLock.Unlock() + } else if res.Xid == -2 { + // Ping response. Ignore. + } else if res.Xid < 0 { + c.logger.Printf("Xid < 0 (%d) but not ping or watcher event", res.Xid) + } else { + if res.Zxid > 0 { + c.lastZxid = res.Zxid + } + + c.requestsLock.Lock() + req, ok := c.requests[res.Xid] + if ok { + delete(c.requests, res.Xid) + } + c.requestsLock.Unlock() + + if !ok { + c.logger.Printf("Response for unknown request with xid %d", res.Xid) + } else { + if res.Err != 0 { + err = res.Err.toError() + } else { + _, err = decodePacket(buf[16:blen], req.recvStruct) + } + if req.recvFunc != nil { + req.recvFunc(req, &res, err) + } + req.recvChan <- response{res.Zxid, err} + if req.opcode == opClose { + return io.EOF + } + } + } + } +} + +func (c *Conn) nextXid() int32 { + return int32(atomic.AddUint32(&c.xid, 1) & 0x7fffffff) +} + +func (c *Conn) addWatcher(path string, watchType watchType) <-chan Event { + c.watchersLock.Lock() + defer c.watchersLock.Unlock() + + ch := make(chan Event, 1) + wpt := watchPathType{path, watchType} + c.watchers[wpt] = append(c.watchers[wpt], ch) + return ch +} + +func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) <-chan response { + rq := &request{ + xid: c.nextXid(), + opcode: opcode, + pkt: req, + recvStruct: res, + recvChan: make(chan response, 1), + recvFunc: recvFunc, + } + c.sendChan <- rq + return rq.recvChan +} + +func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) { + r := <-c.queueRequest(opcode, req, res, recvFunc) + return r.zxid, r.err +} + +func (c *Conn) AddAuth(scheme string, auth []byte) error { + _, err := c.request(opSetAuth, &setAuthRequest{Type: 0, Scheme: scheme, Auth: auth}, &setAuthResponse{}, nil) + + if err != nil { + return err + } + + // Remember authdata so that it can be re-submitted on reconnect + // + // FIXME(prozlach): For now we treat "userfoo:passbar" and "userfoo:passbar2" + // as two different entries, which will be re-submitted on reconnet. Some + // research is needed on how ZK treats these cases and + // then maybe switch to something like "map[username] = password" to allow + // only single password for given user with users being unique. + obj := authCreds{ + scheme: scheme, + auth: auth, + } + + c.credsMu.Lock() + c.creds = append(c.creds, obj) + c.credsMu.Unlock() + + return nil +} + +func (c *Conn) Children(path string) ([]string, *Stat, error) { + if err := validatePath(path, false); err != nil { + return nil, nil, err + } + + res := &getChildren2Response{} + _, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil) + return res.Children, &res.Stat, err +} + +func (c *Conn) ChildrenW(path string) ([]string, *Stat, <-chan Event, error) { + if err := validatePath(path, false); err != nil { + return nil, nil, nil, err + } + + var ech <-chan Event + res := &getChildren2Response{} + _, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { + if err == nil { + ech = c.addWatcher(path, watchTypeChild) + } + }) + if err != nil { + return nil, nil, nil, err + } + return res.Children, &res.Stat, ech, err +} + +func (c *Conn) Get(path string) ([]byte, *Stat, error) { + if err := validatePath(path, false); err != nil { + return nil, nil, err + } + + res := &getDataResponse{} + _, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil) + return res.Data, &res.Stat, err +} + +// GetW returns the contents of a znode and sets a watch +func (c *Conn) GetW(path string) ([]byte, *Stat, <-chan Event, error) { + if err := validatePath(path, false); err != nil { + return nil, nil, nil, err + } + + var ech <-chan Event + res := &getDataResponse{} + _, err := c.request(opGetData, &getDataRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { + if err == nil { + ech = c.addWatcher(path, watchTypeData) + } + }) + if err != nil { + return nil, nil, nil, err + } + return res.Data, &res.Stat, ech, err +} + +func (c *Conn) Set(path string, data []byte, version int32) (*Stat, error) { + if err := validatePath(path, false); err != nil { + return nil, err + } + + res := &setDataResponse{} + _, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil) + return &res.Stat, err +} + +func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string, error) { + if err := validatePath(path, flags&FlagSequence == FlagSequence); err != nil { + return "", err + } + + res := &createResponse{} + _, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil) + return res.Path, err +} + +// CreateProtectedEphemeralSequential fixes a race condition if the server crashes +// after it creates the node. On reconnect the session may still be valid so the +// ephemeral node still exists. Therefore, on reconnect we need to check if a node +// with a GUID generated on create exists. +func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl []ACL) (string, error) { + if err := validatePath(path, true); err != nil { + return "", err + } + + var guid [16]byte + _, err := io.ReadFull(rand.Reader, guid[:16]) + if err != nil { + return "", err + } + guidStr := fmt.Sprintf("%x", guid) + + parts := strings.Split(path, "/") + parts[len(parts)-1] = fmt.Sprintf("%s%s-%s", protectedPrefix, guidStr, parts[len(parts)-1]) + rootPath := strings.Join(parts[:len(parts)-1], "/") + protectedPath := strings.Join(parts, "/") + + var newPath string + for i := 0; i < 3; i++ { + newPath, err = c.Create(protectedPath, data, FlagEphemeral|FlagSequence, acl) + switch err { + case ErrSessionExpired: + // No need to search for the node since it can't exist. Just try again. + case ErrConnectionClosed: + children, _, err := c.Children(rootPath) + if err != nil { + return "", err + } + for _, p := range children { + parts := strings.Split(p, "/") + if pth := parts[len(parts)-1]; strings.HasPrefix(pth, protectedPrefix) { + if g := pth[len(protectedPrefix) : len(protectedPrefix)+32]; g == guidStr { + return rootPath + "/" + p, nil + } + } + } + case nil: + return newPath, nil + default: + return "", err + } + } + return "", err +} + +func (c *Conn) Delete(path string, version int32) error { + if err := validatePath(path, false); err != nil { + return err + } + + _, err := c.request(opDelete, &DeleteRequest{path, version}, &deleteResponse{}, nil) + return err +} + +func (c *Conn) Exists(path string) (bool, *Stat, error) { + if err := validatePath(path, false); err != nil { + return false, nil, err + } + + res := &existsResponse{} + _, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil) + exists := true + if err == ErrNoNode { + exists = false + err = nil + } + return exists, &res.Stat, err +} + +func (c *Conn) ExistsW(path string) (bool, *Stat, <-chan Event, error) { + if err := validatePath(path, false); err != nil { + return false, nil, nil, err + } + + var ech <-chan Event + res := &existsResponse{} + _, err := c.request(opExists, &existsRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { + if err == nil { + ech = c.addWatcher(path, watchTypeData) + } else if err == ErrNoNode { + ech = c.addWatcher(path, watchTypeExist) + } + }) + exists := true + if err == ErrNoNode { + exists = false + err = nil + } + if err != nil { + return false, nil, nil, err + } + return exists, &res.Stat, ech, err +} + +func (c *Conn) GetACL(path string) ([]ACL, *Stat, error) { + if err := validatePath(path, false); err != nil { + return nil, nil, err + } + + res := &getAclResponse{} + _, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil) + return res.Acl, &res.Stat, err +} +func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) { + if err := validatePath(path, false); err != nil { + return nil, err + } + + res := &setAclResponse{} + _, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil) + return &res.Stat, err +} + +func (c *Conn) Sync(path string) (string, error) { + if err := validatePath(path, false); err != nil { + return "", err + } + + res := &syncResponse{} + _, err := c.request(opSync, &syncRequest{Path: path}, res, nil) + return res.Path, err +} + +type MultiResponse struct { + Stat *Stat + String string + Error error +} + +// Multi executes multiple ZooKeeper operations or none of them. The provided +// ops must be one of *CreateRequest, *DeleteRequest, *SetDataRequest, or +// *CheckVersionRequest. +func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) { + req := &multiRequest{ + Ops: make([]multiRequestOp, 0, len(ops)), + DoneHeader: multiHeader{Type: -1, Done: true, Err: -1}, + } + for _, op := range ops { + var opCode int32 + switch op.(type) { + case *CreateRequest: + opCode = opCreate + case *SetDataRequest: + opCode = opSetData + case *DeleteRequest: + opCode = opDelete + case *CheckVersionRequest: + opCode = opCheck + default: + return nil, fmt.Errorf("unknown operation type %T", op) + } + req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCode, false, -1}, op}) + } + res := &multiResponse{} + _, err := c.request(opMulti, req, res, nil) + mr := make([]MultiResponse, len(res.Ops)) + for i, op := range res.Ops { + mr[i] = MultiResponse{Stat: op.Stat, String: op.String, Error: op.Err.toError()} + } + return mr, err +} + +// Server returns the current or last-connected server name. +func (c *Conn) Server() string { + c.serverMu.Lock() + defer c.serverMu.Unlock() + return c.server +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/conn_test.go b/vendor/github.com/samuel/go-zookeeper/zk/conn_test.go new file mode 100644 index 00000000..94206d95 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/conn_test.go @@ -0,0 +1,57 @@ +package zk + +import ( + "io/ioutil" + "testing" + "time" +) + +func TestRecurringReAuthHang(t *testing.T) { + t.Skip("Race condition in test") + + sessionTimeout := 2 * time.Second + + finish := make(chan struct{}) + defer close(finish) + go func() { + select { + case <-finish: + return + case <-time.After(5 * sessionTimeout): + panic("expected not hang") + } + }() + + zkC, err := StartTestCluster(2, ioutil.Discard, ioutil.Discard) + if err != nil { + panic(err) + } + defer zkC.Stop() + + conn, evtC, err := zkC.ConnectAll() + if err != nil { + panic(err) + } + for conn.State() != StateHasSession { + time.Sleep(50 * time.Millisecond) + } + + go func() { + for range evtC { + } + }() + + // Add auth. + conn.AddAuth("digest", []byte("test:test")) + + currentServer := conn.Server() + conn.debugCloseRecvLoop = true + conn.debugReauthDone = make(chan struct{}) + zkC.StopServer(currentServer) + // wait connect to new zookeeper. + for conn.Server() == currentServer && conn.State() != StateHasSession { + time.Sleep(100 * time.Millisecond) + } + + <-conn.debugReauthDone +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/constants.go b/vendor/github.com/samuel/go-zookeeper/zk/constants.go new file mode 100644 index 00000000..33b5563b --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/constants.go @@ -0,0 +1,240 @@ +package zk + +import ( + "errors" +) + +const ( + protocolVersion = 0 + + DefaultPort = 2181 +) + +const ( + opNotify = 0 + opCreate = 1 + opDelete = 2 + opExists = 3 + opGetData = 4 + opSetData = 5 + opGetAcl = 6 + opSetAcl = 7 + opGetChildren = 8 + opSync = 9 + opPing = 11 + opGetChildren2 = 12 + opCheck = 13 + opMulti = 14 + opClose = -11 + opSetAuth = 100 + opSetWatches = 101 + opError = -1 + // Not in protocol, used internally + opWatcherEvent = -2 +) + +const ( + EventNodeCreated EventType = 1 + EventNodeDeleted EventType = 2 + EventNodeDataChanged EventType = 3 + EventNodeChildrenChanged EventType = 4 + + EventSession EventType = -1 + EventNotWatching EventType = -2 +) + +var ( + eventNames = map[EventType]string{ + EventNodeCreated: "EventNodeCreated", + EventNodeDeleted: "EventNodeDeleted", + EventNodeDataChanged: "EventNodeDataChanged", + EventNodeChildrenChanged: "EventNodeChildrenChanged", + EventSession: "EventSession", + EventNotWatching: "EventNotWatching", + } +) + +const ( + StateUnknown State = -1 + StateDisconnected State = 0 + StateConnecting State = 1 + StateAuthFailed State = 4 + StateConnectedReadOnly State = 5 + StateSaslAuthenticated State = 6 + StateExpired State = -112 + + StateConnected = State(100) + StateHasSession = State(101) +) + +const ( + FlagEphemeral = 1 + FlagSequence = 2 +) + +var ( + stateNames = map[State]string{ + StateUnknown: "StateUnknown", + StateDisconnected: "StateDisconnected", + StateConnectedReadOnly: "StateConnectedReadOnly", + StateSaslAuthenticated: "StateSaslAuthenticated", + StateExpired: "StateExpired", + StateAuthFailed: "StateAuthFailed", + StateConnecting: "StateConnecting", + StateConnected: "StateConnected", + StateHasSession: "StateHasSession", + } +) + +type State int32 + +func (s State) String() string { + if name := stateNames[s]; name != "" { + return name + } + return "Unknown" +} + +type ErrCode int32 + +var ( + ErrConnectionClosed = errors.New("zk: connection closed") + ErrUnknown = errors.New("zk: unknown error") + ErrAPIError = errors.New("zk: api error") + ErrNoNode = errors.New("zk: node does not exist") + ErrNoAuth = errors.New("zk: not authenticated") + ErrBadVersion = errors.New("zk: version conflict") + ErrNoChildrenForEphemerals = errors.New("zk: ephemeral nodes may not have children") + ErrNodeExists = errors.New("zk: node already exists") + ErrNotEmpty = errors.New("zk: node has children") + ErrSessionExpired = errors.New("zk: session has been expired by the server") + ErrInvalidACL = errors.New("zk: invalid ACL specified") + ErrAuthFailed = errors.New("zk: client authentication failed") + ErrClosing = errors.New("zk: zookeeper is closing") + ErrNothing = errors.New("zk: no server responsees to process") + ErrSessionMoved = errors.New("zk: session moved to another server, so operation is ignored") + + // ErrInvalidCallback = errors.New("zk: invalid callback specified") + errCodeToError = map[ErrCode]error{ + 0: nil, + errAPIError: ErrAPIError, + errNoNode: ErrNoNode, + errNoAuth: ErrNoAuth, + errBadVersion: ErrBadVersion, + errNoChildrenForEphemerals: ErrNoChildrenForEphemerals, + errNodeExists: ErrNodeExists, + errNotEmpty: ErrNotEmpty, + errSessionExpired: ErrSessionExpired, + // errInvalidCallback: ErrInvalidCallback, + errInvalidAcl: ErrInvalidACL, + errAuthFailed: ErrAuthFailed, + errClosing: ErrClosing, + errNothing: ErrNothing, + errSessionMoved: ErrSessionMoved, + } +) + +func (e ErrCode) toError() error { + if err, ok := errCodeToError[e]; ok { + return err + } + return ErrUnknown +} + +const ( + errOk = 0 + // System and server-side errors + errSystemError = -1 + errRuntimeInconsistency = -2 + errDataInconsistency = -3 + errConnectionLoss = -4 + errMarshallingError = -5 + errUnimplemented = -6 + errOperationTimeout = -7 + errBadArguments = -8 + errInvalidState = -9 + // API errors + errAPIError ErrCode = -100 + errNoNode ErrCode = -101 // * + errNoAuth ErrCode = -102 + errBadVersion ErrCode = -103 // * + errNoChildrenForEphemerals ErrCode = -108 + errNodeExists ErrCode = -110 // * + errNotEmpty ErrCode = -111 + errSessionExpired ErrCode = -112 + errInvalidCallback ErrCode = -113 + errInvalidAcl ErrCode = -114 + errAuthFailed ErrCode = -115 + errClosing ErrCode = -116 + errNothing ErrCode = -117 + errSessionMoved ErrCode = -118 +) + +// Constants for ACL permissions +const ( + PermRead = 1 << iota + PermWrite + PermCreate + PermDelete + PermAdmin + PermAll = 0x1f +) + +var ( + emptyPassword = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + opNames = map[int32]string{ + opNotify: "notify", + opCreate: "create", + opDelete: "delete", + opExists: "exists", + opGetData: "getData", + opSetData: "setData", + opGetAcl: "getACL", + opSetAcl: "setACL", + opGetChildren: "getChildren", + opSync: "sync", + opPing: "ping", + opGetChildren2: "getChildren2", + opCheck: "check", + opMulti: "multi", + opClose: "close", + opSetAuth: "setAuth", + opSetWatches: "setWatches", + + opWatcherEvent: "watcherEvent", + } +) + +type EventType int32 + +func (t EventType) String() string { + if name := eventNames[t]; name != "" { + return name + } + return "Unknown" +} + +// Mode is used to build custom server modes (leader|follower|standalone). +type Mode uint8 + +func (m Mode) String() string { + if name := modeNames[m]; name != "" { + return name + } + return "unknown" +} + +const ( + ModeUnknown Mode = iota + ModeLeader Mode = iota + ModeFollower Mode = iota + ModeStandalone Mode = iota +) + +var ( + modeNames = map[Mode]string{ + ModeLeader: "leader", + ModeFollower: "follower", + ModeStandalone: "standalone", + } +) diff --git a/vendor/github.com/samuel/go-zookeeper/zk/constants_test.go b/vendor/github.com/samuel/go-zookeeper/zk/constants_test.go new file mode 100644 index 00000000..9fe6b04c --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/constants_test.go @@ -0,0 +1,24 @@ +package zk + +import ( + "fmt" + "testing" +) + +func TestModeString(t *testing.T) { + if fmt.Sprintf("%v", ModeUnknown) != "unknown" { + t.Errorf("unknown value should be 'unknown'") + } + + if fmt.Sprintf("%v", ModeLeader) != "leader" { + t.Errorf("leader value should be 'leader'") + } + + if fmt.Sprintf("%v", ModeFollower) != "follower" { + t.Errorf("follower value should be 'follower'") + } + + if fmt.Sprintf("%v", ModeStandalone) != "standalone" { + t.Errorf("standlone value should be 'standalone'") + } +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider.go b/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider.go new file mode 100644 index 00000000..f4bba8d0 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider.go @@ -0,0 +1,88 @@ +package zk + +import ( + "fmt" + "net" + "sync" +) + +// DNSHostProvider is the default HostProvider. It currently matches +// the Java StaticHostProvider, resolving hosts from DNS once during +// the call to Init. It could be easily extended to re-query DNS +// periodically or if there is trouble connecting. +type DNSHostProvider struct { + mu sync.Mutex // Protects everything, so we can add asynchronous updates later. + servers []string + curr int + last int + lookupHost func(string) ([]string, error) // Override of net.LookupHost, for testing. +} + +// Init is called first, with the servers specified in the connection +// string. It uses DNS to look up addresses for each server, then +// shuffles them all together. +func (hp *DNSHostProvider) Init(servers []string) error { + hp.mu.Lock() + defer hp.mu.Unlock() + + lookupHost := hp.lookupHost + if lookupHost == nil { + lookupHost = net.LookupHost + } + + found := []string{} + for _, server := range servers { + host, port, err := net.SplitHostPort(server) + if err != nil { + return err + } + addrs, err := lookupHost(host) + if err != nil { + return err + } + for _, addr := range addrs { + found = append(found, net.JoinHostPort(addr, port)) + } + } + + if len(found) == 0 { + return fmt.Errorf("No hosts found for addresses %q", servers) + } + + // Randomize the order of the servers to avoid creating hotspots + stringShuffle(found) + + hp.servers = found + hp.curr = -1 + hp.last = -1 + + return nil +} + +// Len returns the number of servers available +func (hp *DNSHostProvider) Len() int { + hp.mu.Lock() + defer hp.mu.Unlock() + return len(hp.servers) +} + +// Next returns the next server to connect to. retryStart will be true +// if we've looped through all known servers without Connected() being +// called. +func (hp *DNSHostProvider) Next() (server string, retryStart bool) { + hp.mu.Lock() + defer hp.mu.Unlock() + hp.curr = (hp.curr + 1) % len(hp.servers) + retryStart = hp.curr == hp.last + if hp.last == -1 { + hp.last = 0 + } + return hp.servers[hp.curr], retryStart +} + +// Connected notifies the HostProvider of a successful connection. +func (hp *DNSHostProvider) Connected() { + hp.mu.Lock() + defer hp.mu.Unlock() + hp.last = hp.curr +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider_test.go b/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider_test.go new file mode 100644 index 00000000..77a60658 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider_test.go @@ -0,0 +1,224 @@ +package zk + +import ( + "fmt" + "log" + "testing" + "time" +) + +// localhostLookupHost is a test replacement for net.LookupHost that +// always returns 127.0.0.1 +func localhostLookupHost(host string) ([]string, error) { + return []string{"127.0.0.1"}, nil +} + +// TestDNSHostProviderCreate is just like TestCreate, but with an +// overridden HostProvider that ignores the provided hostname. +func TestDNSHostProviderCreate(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + + port := ts.Servers[0].Port + server := fmt.Sprintf("foo.example.com:%d", port) + hostProvider := &DNSHostProvider{lookupHost: localhostLookupHost} + zk, _, err := Connect([]string{server}, time.Second*15, WithHostProvider(hostProvider)) + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + path := "/gozk-test" + + if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create returned error: %+v", err) + } else if p != path { + t.Fatalf("Create returned different path '%s' != '%s'", p, path) + } + if data, stat, err := zk.Get(path); err != nil { + t.Fatalf("Get returned error: %+v", err) + } else if stat == nil { + t.Fatal("Get returned nil stat") + } else if len(data) < 4 { + t.Fatal("Get returned wrong size data") + } +} + +// localHostPortsFacade wraps a HostProvider, remapping the +// address/port combinations it returns to "localhost:$PORT" where +// $PORT is chosen from the provided ports. +type localHostPortsFacade struct { + inner HostProvider // The wrapped HostProvider + ports []int // The provided list of ports + nextPort int // The next port to use + mapped map[string]string // Already-mapped address/port combinations +} + +func newLocalHostPortsFacade(inner HostProvider, ports []int) *localHostPortsFacade { + return &localHostPortsFacade{ + inner: inner, + ports: ports, + mapped: make(map[string]string), + } +} + +func (lhpf *localHostPortsFacade) Len() int { return lhpf.inner.Len() } +func (lhpf *localHostPortsFacade) Connected() { lhpf.inner.Connected() } +func (lhpf *localHostPortsFacade) Init(servers []string) error { return lhpf.inner.Init(servers) } +func (lhpf *localHostPortsFacade) Next() (string, bool) { + server, retryStart := lhpf.inner.Next() + + // If we've already set up a mapping for that server, just return it. + if localMapping := lhpf.mapped[server]; localMapping != "" { + return localMapping, retryStart + } + + if lhpf.nextPort == len(lhpf.ports) { + log.Fatalf("localHostPortsFacade out of ports to assign to %q; current config: %q", server, lhpf.mapped) + } + + localMapping := fmt.Sprintf("localhost:%d", lhpf.ports[lhpf.nextPort]) + lhpf.mapped[server] = localMapping + lhpf.nextPort++ + return localMapping, retryStart +} + +var _ HostProvider = &localHostPortsFacade{} + +// TestDNSHostProviderReconnect tests that the zk.Conn correctly +// reconnects when the Zookeeper instance it's connected to +// restarts. It wraps the DNSHostProvider in a lightweight facade that +// remaps addresses to localhost:$PORT combinations corresponding to +// the test ZooKeeper instances. +func TestDNSHostProviderReconnect(t *testing.T) { + ts, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + + innerHp := &DNSHostProvider{lookupHost: func(host string) ([]string, error) { + return []string{"192.0.2.1", "192.0.2.2", "192.0.2.3"}, nil + }} + ports := make([]int, 0, len(ts.Servers)) + for _, server := range ts.Servers { + ports = append(ports, server.Port) + } + hp := newLocalHostPortsFacade(innerHp, ports) + + zk, _, err := Connect([]string{"foo.example.com:12345"}, time.Second, WithHostProvider(hp)) + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + path := "/gozk-test" + + // Initial operation to force connection. + if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + + // Figure out which server we're connected to. + currentServer := zk.Server() + t.Logf("Connected to %q. Finding test server index…", currentServer) + serverIndex := -1 + for i, server := range ts.Servers { + server := fmt.Sprintf("localhost:%d", server.Port) + t.Logf("…trying %q", server) + if currentServer == server { + serverIndex = i + t.Logf("…found at index %d", i) + break + } + } + if serverIndex == -1 { + t.Fatalf("Cannot determine test server index.") + } + + // Restart the connected server. + ts.Servers[serverIndex].Srv.Stop() + ts.Servers[serverIndex].Srv.Start() + + // Continue with the basic TestCreate tests. + if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create returned error: %+v", err) + } else if p != path { + t.Fatalf("Create returned different path '%s' != '%s'", p, path) + } + if data, stat, err := zk.Get(path); err != nil { + t.Fatalf("Get returned error: %+v", err) + } else if stat == nil { + t.Fatal("Get returned nil stat") + } else if len(data) < 4 { + t.Fatal("Get returned wrong size data") + } + + if zk.Server() == currentServer { + t.Errorf("Still connected to %q after restart.", currentServer) + } +} + +// TestDNSHostProviderRetryStart tests the `retryStart` functionality +// of DNSHostProvider. +// It's also probably the clearest visual explanation of exactly how +// it works. +func TestDNSHostProviderRetryStart(t *testing.T) { + t.Parallel() + + hp := &DNSHostProvider{lookupHost: func(host string) ([]string, error) { + return []string{"192.0.2.1", "192.0.2.2", "192.0.2.3"}, nil + }} + + if err := hp.Init([]string{"foo.example.com:12345"}); err != nil { + t.Fatal(err) + } + + testdata := []struct { + retryStartWant bool + callConnected bool + }{ + // Repeated failures. + {false, false}, + {false, false}, + {false, false}, + {true, false}, + {false, false}, + {false, false}, + {true, true}, + + // One success offsets things. + {false, false}, + {false, true}, + {false, true}, + + // Repeated successes. + {false, true}, + {false, true}, + {false, true}, + {false, true}, + {false, true}, + + // And some more failures. + {false, false}, + {false, false}, + {true, false}, // Looped back to last known good server: all alternates failed. + {false, false}, + } + + for i, td := range testdata { + _, retryStartGot := hp.Next() + if retryStartGot != td.retryStartWant { + t.Errorf("%d: retryStart=%v; want %v", i, retryStartGot, td.retryStartWant) + } + if td.callConnected { + hp.Connected() + } + } +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/flw.go b/vendor/github.com/samuel/go-zookeeper/zk/flw.go new file mode 100644 index 00000000..3e97f968 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/flw.go @@ -0,0 +1,266 @@ +package zk + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net" + "regexp" + "strconv" + "strings" + "time" +) + +// FLWSrvr is a FourLetterWord helper function. In particular, this function pulls the srvr output +// from the zookeeper instances and parses the output. A slice of *ServerStats structs are returned +// as well as a boolean value to indicate whether this function processed successfully. +// +// If the boolean value is false there was a problem. If the *ServerStats slice is empty or nil, +// then the error happened before we started to obtain 'srvr' values. Otherwise, one of the +// servers had an issue and the "Error" value in the struct should be inspected to determine +// which server had the issue. +func FLWSrvr(servers []string, timeout time.Duration) ([]*ServerStats, bool) { + // different parts of the regular expression that are required to parse the srvr output + const ( + zrVer = `^Zookeeper version: ([A-Za-z0-9\.\-]+), built on (\d\d/\d\d/\d\d\d\d \d\d:\d\d [A-Za-z0-9:\+\-]+)` + zrLat = `^Latency min/avg/max: (\d+)/(\d+)/(\d+)` + zrNet = `^Received: (\d+).*\n^Sent: (\d+).*\n^Connections: (\d+).*\n^Outstanding: (\d+)` + zrState = `^Zxid: (0x[A-Za-z0-9]+).*\n^Mode: (\w+).*\n^Node count: (\d+)` + ) + + // build the regex from the pieces above + re, err := regexp.Compile(fmt.Sprintf(`(?m:\A%v.*\n%v.*\n%v.*\n%v)`, zrVer, zrLat, zrNet, zrState)) + if err != nil { + return nil, false + } + + imOk := true + servers = FormatServers(servers) + ss := make([]*ServerStats, len(servers)) + + for i := range ss { + response, err := fourLetterWord(servers[i], "srvr", timeout) + + if err != nil { + ss[i] = &ServerStats{Error: err} + imOk = false + continue + } + + matches := re.FindAllStringSubmatch(string(response), -1) + + if matches == nil { + err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)") + ss[i] = &ServerStats{Error: err} + imOk = false + continue + } + + match := matches[0][1:] + + // determine current server + var srvrMode Mode + switch match[10] { + case "leader": + srvrMode = ModeLeader + case "follower": + srvrMode = ModeFollower + case "standalone": + srvrMode = ModeStandalone + default: + srvrMode = ModeUnknown + } + + buildTime, err := time.Parse("01/02/2006 15:04 MST", match[1]) + + if err != nil { + ss[i] = &ServerStats{Error: err} + imOk = false + continue + } + + parsedInt, err := strconv.ParseInt(match[9], 0, 64) + + if err != nil { + ss[i] = &ServerStats{Error: err} + imOk = false + continue + } + + // the ZxID value is an int64 with two int32s packed inside + // the high int32 is the epoch (i.e., number of leader elections) + // the low int32 is the counter + epoch := int32(parsedInt >> 32) + counter := int32(parsedInt & 0xFFFFFFFF) + + // within the regex above, these values must be numerical + // so we can avoid useless checking of the error return value + minLatency, _ := strconv.ParseInt(match[2], 0, 64) + avgLatency, _ := strconv.ParseInt(match[3], 0, 64) + maxLatency, _ := strconv.ParseInt(match[4], 0, 64) + recv, _ := strconv.ParseInt(match[5], 0, 64) + sent, _ := strconv.ParseInt(match[6], 0, 64) + cons, _ := strconv.ParseInt(match[7], 0, 64) + outs, _ := strconv.ParseInt(match[8], 0, 64) + ncnt, _ := strconv.ParseInt(match[11], 0, 64) + + ss[i] = &ServerStats{ + Sent: sent, + Received: recv, + NodeCount: ncnt, + MinLatency: minLatency, + AvgLatency: avgLatency, + MaxLatency: maxLatency, + Connections: cons, + Outstanding: outs, + Epoch: epoch, + Counter: counter, + BuildTime: buildTime, + Mode: srvrMode, + Version: match[0], + } + } + + return ss, imOk +} + +// FLWRuok is a FourLetterWord helper function. In particular, this function +// pulls the ruok output from each server. +func FLWRuok(servers []string, timeout time.Duration) []bool { + servers = FormatServers(servers) + oks := make([]bool, len(servers)) + + for i := range oks { + response, err := fourLetterWord(servers[i], "ruok", timeout) + + if err != nil { + continue + } + + if bytes.Equal(response[:4], []byte("imok")) { + oks[i] = true + } + } + return oks +} + +// FLWCons is a FourLetterWord helper function. In particular, this function +// pulls the ruok output from each server. +// +// As with FLWSrvr, the boolean value indicates whether one of the requests had +// an issue. The Clients struct has an Error value that can be checked. +func FLWCons(servers []string, timeout time.Duration) ([]*ServerClients, bool) { + const ( + zrAddr = `^ /((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):(?:\d+))\[\d+\]` + zrPac = `\(queued=(\d+),recved=(\d+),sent=(\d+),sid=(0x[A-Za-z0-9]+),lop=(\w+),est=(\d+),to=(\d+),` + zrSesh = `lcxid=(0x[A-Za-z0-9]+),lzxid=(0x[A-Za-z0-9]+),lresp=(\d+),llat=(\d+),minlat=(\d+),avglat=(\d+),maxlat=(\d+)\)` + ) + + re, err := regexp.Compile(fmt.Sprintf("%v%v%v", zrAddr, zrPac, zrSesh)) + if err != nil { + return nil, false + } + + servers = FormatServers(servers) + sc := make([]*ServerClients, len(servers)) + imOk := true + + for i := range sc { + response, err := fourLetterWord(servers[i], "cons", timeout) + + if err != nil { + sc[i] = &ServerClients{Error: err} + imOk = false + continue + } + + scan := bufio.NewScanner(bytes.NewReader(response)) + + var clients []*ServerClient + + for scan.Scan() { + line := scan.Bytes() + + if len(line) == 0 { + continue + } + + m := re.FindAllStringSubmatch(string(line), -1) + + if m == nil { + err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)") + sc[i] = &ServerClients{Error: err} + imOk = false + continue + } + + match := m[0][1:] + + queued, _ := strconv.ParseInt(match[1], 0, 64) + recvd, _ := strconv.ParseInt(match[2], 0, 64) + sent, _ := strconv.ParseInt(match[3], 0, 64) + sid, _ := strconv.ParseInt(match[4], 0, 64) + est, _ := strconv.ParseInt(match[6], 0, 64) + timeout, _ := strconv.ParseInt(match[7], 0, 32) + lcxid, _ := parseInt64(match[8]) + lzxid, _ := parseInt64(match[9]) + lresp, _ := strconv.ParseInt(match[10], 0, 64) + llat, _ := strconv.ParseInt(match[11], 0, 32) + minlat, _ := strconv.ParseInt(match[12], 0, 32) + avglat, _ := strconv.ParseInt(match[13], 0, 32) + maxlat, _ := strconv.ParseInt(match[14], 0, 32) + + clients = append(clients, &ServerClient{ + Queued: queued, + Received: recvd, + Sent: sent, + SessionID: sid, + Lcxid: int64(lcxid), + Lzxid: int64(lzxid), + Timeout: int32(timeout), + LastLatency: int32(llat), + MinLatency: int32(minlat), + AvgLatency: int32(avglat), + MaxLatency: int32(maxlat), + Established: time.Unix(est, 0), + LastResponse: time.Unix(lresp, 0), + Addr: match[0], + LastOperation: match[5], + }) + } + + sc[i] = &ServerClients{Clients: clients} + } + + return sc, imOk +} + +// parseInt64 is similar to strconv.ParseInt, but it also handles hex values that represent negative numbers +func parseInt64(s string) (int64, error) { + if strings.HasPrefix(s, "0x") { + i, err := strconv.ParseUint(s, 0, 64) + return int64(i), err + } + return strconv.ParseInt(s, 0, 64) +} + +func fourLetterWord(server, command string, timeout time.Duration) ([]byte, error) { + conn, err := net.DialTimeout("tcp", server, timeout) + if err != nil { + return nil, err + } + + // the zookeeper server should automatically close this socket + // once the command has been processed, but better safe than sorry + defer conn.Close() + + conn.SetWriteDeadline(time.Now().Add(timeout)) + _, err = conn.Write([]byte(command)) + if err != nil { + return nil, err + } + + conn.SetReadDeadline(time.Now().Add(timeout)) + return ioutil.ReadAll(conn) +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/flw_test.go b/vendor/github.com/samuel/go-zookeeper/zk/flw_test.go new file mode 100644 index 00000000..5bbabb9b --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/flw_test.go @@ -0,0 +1,330 @@ +package zk + +import ( + "net" + "testing" + "time" +) + +var ( + zkSrvrOut = `Zookeeper version: 3.4.6-1569965, built on 02/20/2014 09:09 GMT +Latency min/avg/max: 0/1/10 +Received: 4207 +Sent: 4220 +Connections: 81 +Outstanding: 1 +Zxid: 0x110a7a8f37 +Mode: leader +Node count: 306 +` + zkConsOut = ` /10.42.45.231:45361[1](queued=0,recved=9435,sent=9457,sid=0x94c2989e04716b5,lop=PING,est=1427238717217,to=20001,lcxid=0x55120915,lzxid=0xffffffffffffffff,lresp=1427259255908,llat=0,minlat=0,avglat=1,maxlat=17) + /10.55.33.98:34342[1](queued=0,recved=9338,sent=9350,sid=0x94c2989e0471731,lop=PING,est=1427238849319,to=20001,lcxid=0x55120944,lzxid=0xffffffffffffffff,lresp=1427259252294,llat=0,minlat=0,avglat=1,maxlat=18) + /10.44.145.114:46556[1](queued=0,recved=109253,sent=109617,sid=0x94c2989e0471709,lop=DELE,est=1427238791305,to=20001,lcxid=0x55139618,lzxid=0x110a7b187d,lresp=1427259257423,llat=2,minlat=0,avglat=1,maxlat=23) + +` +) + +func TestFLWRuok(t *testing.T) { + t.Parallel() + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + go tcpServer(l, "") + + oks := FLWRuok([]string{l.Addr().String()}, time.Second*10) + if len(oks) == 0 { + t.Errorf("no values returned") + } + if !oks[0] { + t.Errorf("instance should be marked as OK") + } + + // + // Confirm that it also returns false for dead instances + // + l, err = net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + go tcpServer(l, "dead") + + oks = FLWRuok([]string{l.Addr().String()}, time.Second*10) + if len(oks) == 0 { + t.Errorf("no values returned") + } + if oks[0] { + t.Errorf("instance should be marked as not OK") + } +} + +func TestFLWSrvr(t *testing.T) { + t.Parallel() + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + go tcpServer(l, "") + + statsSlice, ok := FLWSrvr([]string{l.Addr().String()}, time.Second*10) + if !ok { + t.Errorf("failure indicated on 'srvr' parsing") + } + if len(statsSlice) == 0 { + t.Errorf("no *ServerStats instances returned") + } + + stats := statsSlice[0] + + if stats.Error != nil { + t.Fatalf("error seen in stats: %v", err.Error()) + } + + if stats.Sent != 4220 { + t.Errorf("Sent != 4220") + } + + if stats.Received != 4207 { + t.Errorf("Received != 4207") + } + + if stats.NodeCount != 306 { + t.Errorf("NodeCount != 306") + } + + if stats.MinLatency != 0 { + t.Errorf("MinLatency != 0") + } + + if stats.AvgLatency != 1 { + t.Errorf("AvgLatency != 1") + } + + if stats.MaxLatency != 10 { + t.Errorf("MaxLatency != 10") + } + + if stats.Connections != 81 { + t.Errorf("Connection != 81") + } + + if stats.Outstanding != 1 { + t.Errorf("Outstanding != 1") + } + + if stats.Epoch != 17 { + t.Errorf("Epoch != 17") + } + + if stats.Counter != 175804215 { + t.Errorf("Counter != 175804215") + } + + if stats.Mode != ModeLeader { + t.Errorf("Mode != ModeLeader") + } + + if stats.Version != "3.4.6-1569965" { + t.Errorf("Version expected: 3.4.6-1569965") + } +} + +func TestFLWCons(t *testing.T) { + t.Parallel() + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + go tcpServer(l, "") + + clients, ok := FLWCons([]string{l.Addr().String()}, time.Second*10) + if !ok { + t.Errorf("failure indicated on 'cons' parsing") + } + if len(clients) == 0 { + t.Errorf("no *ServerClients instances returned") + } + + results := []*ServerClient{ + { + Queued: 0, + Received: 9435, + Sent: 9457, + SessionID: 669956116721374901, + LastOperation: "PING", + Established: time.Unix(1427238717217, 0), + Timeout: 20001, + Lcxid: 1427245333, + Lzxid: -1, + LastResponse: time.Unix(1427259255908, 0), + LastLatency: 0, + MinLatency: 0, + AvgLatency: 1, + MaxLatency: 17, + Addr: "10.42.45.231:45361", + }, + { + Queued: 0, + Received: 9338, + Sent: 9350, + SessionID: 669956116721375025, + LastOperation: "PING", + Established: time.Unix(1427238849319, 0), + Timeout: 20001, + Lcxid: 1427245380, + Lzxid: -1, + LastResponse: time.Unix(1427259252294, 0), + LastLatency: 0, + MinLatency: 0, + AvgLatency: 1, + MaxLatency: 18, + Addr: "10.55.33.98:34342", + }, + { + Queued: 0, + Received: 109253, + Sent: 109617, + SessionID: 669956116721374985, + LastOperation: "DELE", + Established: time.Unix(1427238791305, 0), + Timeout: 20001, + Lcxid: 1427346968, + Lzxid: 73190283389, + LastResponse: time.Unix(1427259257423, 0), + LastLatency: 2, + MinLatency: 0, + AvgLatency: 1, + MaxLatency: 23, + Addr: "10.44.145.114:46556", + }, + } + + for _, z := range clients { + if z.Error != nil { + t.Errorf("error seen: %v", err.Error()) + } + + for i, v := range z.Clients { + c := results[i] + + if v.Error != nil { + t.Errorf("client error seen: %v", err.Error()) + } + + if v.Queued != c.Queued { + t.Errorf("Queued value mismatch (%d/%d)", v.Queued, c.Queued) + } + + if v.Received != c.Received { + t.Errorf("Received value mismatch (%d/%d)", v.Received, c.Received) + } + + if v.Sent != c.Sent { + t.Errorf("Sent value mismatch (%d/%d)", v.Sent, c.Sent) + } + + if v.SessionID != c.SessionID { + t.Errorf("SessionID value mismatch (%d/%d)", v.SessionID, c.SessionID) + } + + if v.LastOperation != c.LastOperation { + t.Errorf("LastOperation value mismatch ('%v'/'%v')", v.LastOperation, c.LastOperation) + } + + if v.Timeout != c.Timeout { + t.Errorf("Timeout value mismatch (%d/%d)", v.Timeout, c.Timeout) + } + + if v.Lcxid != c.Lcxid { + t.Errorf("Lcxid value mismatch (%d/%d)", v.Lcxid, c.Lcxid) + } + + if v.Lzxid != c.Lzxid { + t.Errorf("Lzxid value mismatch (%d/%d)", v.Lzxid, c.Lzxid) + } + + if v.LastLatency != c.LastLatency { + t.Errorf("LastLatency value mismatch (%d/%d)", v.LastLatency, c.LastLatency) + } + + if v.MinLatency != c.MinLatency { + t.Errorf("MinLatency value mismatch (%d/%d)", v.MinLatency, c.MinLatency) + } + + if v.AvgLatency != c.AvgLatency { + t.Errorf("AvgLatency value mismatch (%d/%d)", v.AvgLatency, c.AvgLatency) + } + + if v.MaxLatency != c.MaxLatency { + t.Errorf("MaxLatency value mismatch (%d/%d)", v.MaxLatency, c.MaxLatency) + } + + if v.Addr != c.Addr { + t.Errorf("Addr value mismatch ('%v'/'%v')", v.Addr, c.Addr) + } + + if !c.Established.Equal(v.Established) { + t.Errorf("Established value mismatch (%v/%v)", c.Established, v.Established) + } + + if !c.LastResponse.Equal(v.LastResponse) { + t.Errorf("Established value mismatch (%v/%v)", c.LastResponse, v.LastResponse) + } + } + } +} + +func tcpServer(listener net.Listener, thing string) { + for { + conn, err := listener.Accept() + if err != nil { + return + } + go connHandler(conn, thing) + } +} + +func connHandler(conn net.Conn, thing string) { + defer conn.Close() + + data := make([]byte, 4) + + _, err := conn.Read(data) + if err != nil { + return + } + + switch string(data) { + case "ruok": + switch thing { + case "dead": + return + default: + conn.Write([]byte("imok")) + } + case "srvr": + switch thing { + case "dead": + return + default: + conn.Write([]byte(zkSrvrOut)) + } + case "cons": + switch thing { + case "dead": + return + default: + conn.Write([]byte(zkConsOut)) + } + default: + conn.Write([]byte("This ZooKeeper instance is not currently serving requests.")) + } +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/lock.go b/vendor/github.com/samuel/go-zookeeper/zk/lock.go new file mode 100644 index 00000000..3c35a427 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/lock.go @@ -0,0 +1,150 @@ +package zk + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +var ( + // ErrDeadlock is returned by Lock when trying to lock twice without unlocking first + ErrDeadlock = errors.New("zk: trying to acquire a lock twice") + // ErrNotLocked is returned by Unlock when trying to release a lock that has not first be acquired. + ErrNotLocked = errors.New("zk: not locked") +) + +// Lock is a mutual exclusion lock. +type Lock struct { + c *Conn + path string + acl []ACL + lockPath string + seq int +} + +// NewLock creates a new lock instance using the provided connection, path, and acl. +// The path must be a node that is only used by this lock. A lock instances starts +// unlocked until Lock() is called. +func NewLock(c *Conn, path string, acl []ACL) *Lock { + return &Lock{ + c: c, + path: path, + acl: acl, + } +} + +func parseSeq(path string) (int, error) { + parts := strings.Split(path, "-") + return strconv.Atoi(parts[len(parts)-1]) +} + +// Lock attempts to acquire the lock. It will wait to return until the lock +// is acquired or an error occurs. If this instance already has the lock +// then ErrDeadlock is returned. +func (l *Lock) Lock() error { + if l.lockPath != "" { + return ErrDeadlock + } + + prefix := fmt.Sprintf("%s/lock-", l.path) + + path := "" + var err error + for i := 0; i < 3; i++ { + path, err = l.c.CreateProtectedEphemeralSequential(prefix, []byte{}, l.acl) + if err == ErrNoNode { + // Create parent node. + parts := strings.Split(l.path, "/") + pth := "" + for _, p := range parts[1:] { + var exists bool + pth += "/" + p + exists, _, err = l.c.Exists(pth) + if err != nil { + return err + } + if exists == true { + continue + } + _, err = l.c.Create(pth, []byte{}, 0, l.acl) + if err != nil && err != ErrNodeExists { + return err + } + } + } else if err == nil { + break + } else { + return err + } + } + if err != nil { + return err + } + + seq, err := parseSeq(path) + if err != nil { + return err + } + + for { + children, _, err := l.c.Children(l.path) + if err != nil { + return err + } + + lowestSeq := seq + prevSeq := -1 + prevSeqPath := "" + for _, p := range children { + s, err := parseSeq(p) + if err != nil { + return err + } + if s < lowestSeq { + lowestSeq = s + } + if s < seq && s > prevSeq { + prevSeq = s + prevSeqPath = p + } + } + + if seq == lowestSeq { + // Acquired the lock + break + } + + // Wait on the node next in line for the lock + _, _, ch, err := l.c.GetW(l.path + "/" + prevSeqPath) + if err != nil && err != ErrNoNode { + return err + } else if err != nil && err == ErrNoNode { + // try again + continue + } + + ev := <-ch + if ev.Err != nil { + return ev.Err + } + } + + l.seq = seq + l.lockPath = path + return nil +} + +// Unlock releases an acquired lock. If the lock is not currently acquired by +// this Lock instance than ErrNotLocked is returned. +func (l *Lock) Unlock() error { + if l.lockPath == "" { + return ErrNotLocked + } + if err := l.c.Delete(l.lockPath, -1); err != nil { + return err + } + l.lockPath = "" + l.seq = 0 + return nil +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/lock_test.go b/vendor/github.com/samuel/go-zookeeper/zk/lock_test.go new file mode 100644 index 00000000..8a3478a3 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/lock_test.go @@ -0,0 +1,94 @@ +package zk + +import ( + "testing" + "time" +) + +func TestLock(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + acls := WorldACL(PermAll) + + l := NewLock(zk, "/test", acls) + if err := l.Lock(); err != nil { + t.Fatal(err) + } + if err := l.Unlock(); err != nil { + t.Fatal(err) + } + + val := make(chan int, 3) + + if err := l.Lock(); err != nil { + t.Fatal(err) + } + + l2 := NewLock(zk, "/test", acls) + go func() { + if err := l2.Lock(); err != nil { + t.Fatal(err) + } + val <- 2 + if err := l2.Unlock(); err != nil { + t.Fatal(err) + } + val <- 3 + }() + time.Sleep(time.Millisecond * 100) + + val <- 1 + if err := l.Unlock(); err != nil { + t.Fatal(err) + } + if x := <-val; x != 1 { + t.Fatalf("Expected 1 instead of %d", x) + } + if x := <-val; x != 2 { + t.Fatalf("Expected 2 instead of %d", x) + } + if x := <-val; x != 3 { + t.Fatalf("Expected 3 instead of %d", x) + } +} + +// This tests creating a lock with a path that's more than 1 node deep (e.g. "/test-multi-level/lock"), +// when a part of that path already exists (i.e. "/test-multi-level" node already exists). +func TestMultiLevelLock(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + acls := WorldACL(PermAll) + path := "/test-multi-level" + if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create returned error: %+v", err) + } else if p != path { + t.Fatalf("Create returned different path '%s' != '%s'", p, path) + } + l := NewLock(zk, "/test-multi-level/lock", acls) + defer zk.Delete("/test-multi-level", -1) // Clean up what we've created for this test + defer zk.Delete("/test-multi-level/lock", -1) + if err := l.Lock(); err != nil { + t.Fatal(err) + } + if err := l.Unlock(); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/server_help.go b/vendor/github.com/samuel/go-zookeeper/zk/server_help.go new file mode 100644 index 00000000..3663064c --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/server_help.go @@ -0,0 +1,216 @@ +package zk + +import ( + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +type TestServer struct { + Port int + Path string + Srv *Server +} + +type TestCluster struct { + Path string + Servers []TestServer +} + +func StartTestCluster(size int, stdout, stderr io.Writer) (*TestCluster, error) { + tmpPath, err := ioutil.TempDir("", "gozk") + if err != nil { + return nil, err + } + success := false + startPort := int(rand.Int31n(6000) + 10000) + cluster := &TestCluster{Path: tmpPath} + defer func() { + if !success { + cluster.Stop() + } + }() + for serverN := 0; serverN < size; serverN++ { + srvPath := filepath.Join(tmpPath, fmt.Sprintf("srv%d", serverN)) + if err := os.Mkdir(srvPath, 0700); err != nil { + return nil, err + } + port := startPort + serverN*3 + cfg := ServerConfig{ + ClientPort: port, + DataDir: srvPath, + } + for i := 0; i < size; i++ { + cfg.Servers = append(cfg.Servers, ServerConfigServer{ + ID: i + 1, + Host: "127.0.0.1", + PeerPort: startPort + i*3 + 1, + LeaderElectionPort: startPort + i*3 + 2, + }) + } + cfgPath := filepath.Join(srvPath, "zoo.cfg") + fi, err := os.Create(cfgPath) + if err != nil { + return nil, err + } + err = cfg.Marshall(fi) + fi.Close() + if err != nil { + return nil, err + } + + fi, err = os.Create(filepath.Join(srvPath, "myid")) + if err != nil { + return nil, err + } + _, err = fmt.Fprintf(fi, "%d\n", serverN+1) + fi.Close() + if err != nil { + return nil, err + } + + srv := &Server{ + ConfigPath: cfgPath, + Stdout: stdout, + Stderr: stderr, + } + if err := srv.Start(); err != nil { + return nil, err + } + cluster.Servers = append(cluster.Servers, TestServer{ + Path: srvPath, + Port: cfg.ClientPort, + Srv: srv, + }) + } + if err := cluster.waitForStart(10, time.Second); err != nil { + return nil, err + } + success = true + return cluster, nil +} + +func (tc *TestCluster) Connect(idx int) (*Conn, error) { + zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", tc.Servers[idx].Port)}, time.Second*15) + return zk, err +} + +func (tc *TestCluster) ConnectAll() (*Conn, <-chan Event, error) { + return tc.ConnectAllTimeout(time.Second * 15) +} + +func (tc *TestCluster) ConnectAllTimeout(sessionTimeout time.Duration) (*Conn, <-chan Event, error) { + return tc.ConnectWithOptions(sessionTimeout) +} + +func (tc *TestCluster) ConnectWithOptions(sessionTimeout time.Duration, options ...connOption) (*Conn, <-chan Event, error) { + hosts := make([]string, len(tc.Servers)) + for i, srv := range tc.Servers { + hosts[i] = fmt.Sprintf("127.0.0.1:%d", srv.Port) + } + zk, ch, err := Connect(hosts, sessionTimeout, options...) + return zk, ch, err +} + +func (tc *TestCluster) Stop() error { + for _, srv := range tc.Servers { + srv.Srv.Stop() + } + defer os.RemoveAll(tc.Path) + return tc.waitForStop(5, time.Second) +} + +// waitForStart blocks until the cluster is up +func (tc *TestCluster) waitForStart(maxRetry int, interval time.Duration) error { + // verify that the servers are up with SRVR + serverAddrs := make([]string, len(tc.Servers)) + for i, s := range tc.Servers { + serverAddrs[i] = fmt.Sprintf("127.0.0.1:%d", s.Port) + } + + for i := 0; i < maxRetry; i++ { + _, ok := FLWSrvr(serverAddrs, time.Second) + if ok { + return nil + } + time.Sleep(interval) + } + return fmt.Errorf("unable to verify health of servers") +} + +// waitForStop blocks until the cluster is down +func (tc *TestCluster) waitForStop(maxRetry int, interval time.Duration) error { + // verify that the servers are up with RUOK + serverAddrs := make([]string, len(tc.Servers)) + for i, s := range tc.Servers { + serverAddrs[i] = fmt.Sprintf("127.0.0.1:%d", s.Port) + } + + var success bool + for i := 0; i < maxRetry && !success; i++ { + success = true + for _, ok := range FLWRuok(serverAddrs, time.Second) { + if ok { + success = false + } + } + if !success { + time.Sleep(interval) + } + } + if !success { + return fmt.Errorf("unable to verify servers are down") + } + return nil +} + +func (tc *TestCluster) StartServer(server string) { + for _, s := range tc.Servers { + if strings.HasSuffix(server, fmt.Sprintf(":%d", s.Port)) { + s.Srv.Start() + return + } + } + panic(fmt.Sprintf("Unknown server: %s", server)) +} + +func (tc *TestCluster) StopServer(server string) { + for _, s := range tc.Servers { + if strings.HasSuffix(server, fmt.Sprintf(":%d", s.Port)) { + s.Srv.Stop() + return + } + } + panic(fmt.Sprintf("Unknown server: %s", server)) +} + +func (tc *TestCluster) StartAllServers() error { + for _, s := range tc.Servers { + if err := s.Srv.Start(); err != nil { + return fmt.Errorf( + "Failed to start server listening on port `%d` : %+v", s.Port, err) + } + } + + return nil +} + +func (tc *TestCluster) StopAllServers() error { + for _, s := range tc.Servers { + if err := s.Srv.Stop(); err != nil { + return fmt.Errorf( + "Failed to stop server listening on port `%d` : %+v", s.Port, err) + } + } + + return nil +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/server_java.go b/vendor/github.com/samuel/go-zookeeper/zk/server_java.go new file mode 100644 index 00000000..e553ec1d --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/server_java.go @@ -0,0 +1,136 @@ +package zk + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" +) + +type ErrMissingServerConfigField string + +func (e ErrMissingServerConfigField) Error() string { + return fmt.Sprintf("zk: missing server config field '%s'", string(e)) +} + +const ( + DefaultServerTickTime = 2000 + DefaultServerInitLimit = 10 + DefaultServerSyncLimit = 5 + DefaultServerAutoPurgeSnapRetainCount = 3 + DefaultPeerPort = 2888 + DefaultLeaderElectionPort = 3888 +) + +type ServerConfigServer struct { + ID int + Host string + PeerPort int + LeaderElectionPort int +} + +type ServerConfig struct { + TickTime int // Number of milliseconds of each tick + InitLimit int // Number of ticks that the initial synchronization phase can take + SyncLimit int // Number of ticks that can pass between sending a request and getting an acknowledgement + DataDir string // Direcrory where the snapshot is stored + ClientPort int // Port at which clients will connect + AutoPurgeSnapRetainCount int // Number of snapshots to retain in dataDir + AutoPurgePurgeInterval int // Purge task internal in hours (0 to disable auto purge) + Servers []ServerConfigServer +} + +func (sc ServerConfig) Marshall(w io.Writer) error { + if sc.DataDir == "" { + return ErrMissingServerConfigField("dataDir") + } + fmt.Fprintf(w, "dataDir=%s\n", sc.DataDir) + if sc.TickTime <= 0 { + sc.TickTime = DefaultServerTickTime + } + fmt.Fprintf(w, "tickTime=%d\n", sc.TickTime) + if sc.InitLimit <= 0 { + sc.InitLimit = DefaultServerInitLimit + } + fmt.Fprintf(w, "initLimit=%d\n", sc.InitLimit) + if sc.SyncLimit <= 0 { + sc.SyncLimit = DefaultServerSyncLimit + } + fmt.Fprintf(w, "syncLimit=%d\n", sc.SyncLimit) + if sc.ClientPort <= 0 { + sc.ClientPort = DefaultPort + } + fmt.Fprintf(w, "clientPort=%d\n", sc.ClientPort) + if sc.AutoPurgePurgeInterval > 0 { + if sc.AutoPurgeSnapRetainCount <= 0 { + sc.AutoPurgeSnapRetainCount = DefaultServerAutoPurgeSnapRetainCount + } + fmt.Fprintf(w, "autopurge.snapRetainCount=%d\n", sc.AutoPurgeSnapRetainCount) + fmt.Fprintf(w, "autopurge.purgeInterval=%d\n", sc.AutoPurgePurgeInterval) + } + if len(sc.Servers) > 0 { + for _, srv := range sc.Servers { + if srv.PeerPort <= 0 { + srv.PeerPort = DefaultPeerPort + } + if srv.LeaderElectionPort <= 0 { + srv.LeaderElectionPort = DefaultLeaderElectionPort + } + fmt.Fprintf(w, "server.%d=%s:%d:%d\n", srv.ID, srv.Host, srv.PeerPort, srv.LeaderElectionPort) + } + } + return nil +} + +var jarSearchPaths = []string{ + "zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", + "../zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", + "/usr/share/java/zookeeper-*.jar", + "/usr/local/zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", + "/usr/local/Cellar/zookeeper/*/libexec/contrib/fatjar/zookeeper-*-fatjar.jar", +} + +func findZookeeperFatJar() string { + var paths []string + zkPath := os.Getenv("ZOOKEEPER_PATH") + if zkPath == "" { + paths = jarSearchPaths + } else { + paths = []string{filepath.Join(zkPath, "contrib/fatjar/zookeeper-*-fatjar.jar")} + } + for _, path := range paths { + matches, _ := filepath.Glob(path) + // TODO: could sort by version and pick latest + if len(matches) > 0 { + return matches[0] + } + } + return "" +} + +type Server struct { + JarPath string + ConfigPath string + Stdout, Stderr io.Writer + + cmd *exec.Cmd +} + +func (srv *Server) Start() error { + if srv.JarPath == "" { + srv.JarPath = findZookeeperFatJar() + if srv.JarPath == "" { + return fmt.Errorf("zk: unable to find server jar") + } + } + srv.cmd = exec.Command("java", "-jar", srv.JarPath, "server", srv.ConfigPath) + srv.cmd.Stdout = srv.Stdout + srv.cmd.Stderr = srv.Stderr + return srv.cmd.Start() +} + +func (srv *Server) Stop() error { + srv.cmd.Process.Signal(os.Kill) + return srv.cmd.Wait() +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/structs.go b/vendor/github.com/samuel/go-zookeeper/zk/structs.go new file mode 100644 index 00000000..d4af27de --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/structs.go @@ -0,0 +1,609 @@ +package zk + +import ( + "encoding/binary" + "errors" + "log" + "reflect" + "runtime" + "time" +) + +var ( + ErrUnhandledFieldType = errors.New("zk: unhandled field type") + ErrPtrExpected = errors.New("zk: encode/decode expect a non-nil pointer to struct") + ErrShortBuffer = errors.New("zk: buffer too small") +) + +type defaultLogger struct{} + +func (defaultLogger) Printf(format string, a ...interface{}) { + log.Printf(format, a...) +} + +type ACL struct { + Perms int32 + Scheme string + ID string +} + +type Stat struct { + Czxid int64 // The zxid of the change that caused this znode to be created. + Mzxid int64 // The zxid of the change that last modified this znode. + Ctime int64 // The time in milliseconds from epoch when this znode was created. + Mtime int64 // The time in milliseconds from epoch when this znode was last modified. + Version int32 // The number of changes to the data of this znode. + Cversion int32 // The number of changes to the children of this znode. + Aversion int32 // The number of changes to the ACL of this znode. + EphemeralOwner int64 // The session id of the owner of this znode if the znode is an ephemeral node. If it is not an ephemeral node, it will be zero. + DataLength int32 // The length of the data field of this znode. + NumChildren int32 // The number of children of this znode. + Pzxid int64 // last modified children +} + +// ServerClient is the information for a single Zookeeper client and its session. +// This is used to parse/extract the output fo the `cons` command. +type ServerClient struct { + Queued int64 + Received int64 + Sent int64 + SessionID int64 + Lcxid int64 + Lzxid int64 + Timeout int32 + LastLatency int32 + MinLatency int32 + AvgLatency int32 + MaxLatency int32 + Established time.Time + LastResponse time.Time + Addr string + LastOperation string // maybe? + Error error +} + +// ServerClients is a struct for the FLWCons() function. It's used to provide +// the list of Clients. +// +// This is needed because FLWCons() takes multiple servers. +type ServerClients struct { + Clients []*ServerClient + Error error +} + +// ServerStats is the information pulled from the Zookeeper `stat` command. +type ServerStats struct { + Sent int64 + Received int64 + NodeCount int64 + MinLatency int64 + AvgLatency int64 + MaxLatency int64 + Connections int64 + Outstanding int64 + Epoch int32 + Counter int32 + BuildTime time.Time + Mode Mode + Version string + Error error +} + +type requestHeader struct { + Xid int32 + Opcode int32 +} + +type responseHeader struct { + Xid int32 + Zxid int64 + Err ErrCode +} + +type multiHeader struct { + Type int32 + Done bool + Err ErrCode +} + +type auth struct { + Type int32 + Scheme string + Auth []byte +} + +// Generic request structs + +type pathRequest struct { + Path string +} + +type PathVersionRequest struct { + Path string + Version int32 +} + +type pathWatchRequest struct { + Path string + Watch bool +} + +type pathResponse struct { + Path string +} + +type statResponse struct { + Stat Stat +} + +// + +type CheckVersionRequest PathVersionRequest +type closeRequest struct{} +type closeResponse struct{} + +type connectRequest struct { + ProtocolVersion int32 + LastZxidSeen int64 + TimeOut int32 + SessionID int64 + Passwd []byte +} + +type connectResponse struct { + ProtocolVersion int32 + TimeOut int32 + SessionID int64 + Passwd []byte +} + +type CreateRequest struct { + Path string + Data []byte + Acl []ACL + Flags int32 +} + +type createResponse pathResponse +type DeleteRequest PathVersionRequest +type deleteResponse struct{} + +type errorResponse struct { + Err int32 +} + +type existsRequest pathWatchRequest +type existsResponse statResponse +type getAclRequest pathRequest + +type getAclResponse struct { + Acl []ACL + Stat Stat +} + +type getChildrenRequest pathRequest + +type getChildrenResponse struct { + Children []string +} + +type getChildren2Request pathWatchRequest + +type getChildren2Response struct { + Children []string + Stat Stat +} + +type getDataRequest pathWatchRequest + +type getDataResponse struct { + Data []byte + Stat Stat +} + +type getMaxChildrenRequest pathRequest + +type getMaxChildrenResponse struct { + Max int32 +} + +type getSaslRequest struct { + Token []byte +} + +type pingRequest struct{} +type pingResponse struct{} + +type setAclRequest struct { + Path string + Acl []ACL + Version int32 +} + +type setAclResponse statResponse + +type SetDataRequest struct { + Path string + Data []byte + Version int32 +} + +type setDataResponse statResponse + +type setMaxChildren struct { + Path string + Max int32 +} + +type setSaslRequest struct { + Token string +} + +type setSaslResponse struct { + Token string +} + +type setWatchesRequest struct { + RelativeZxid int64 + DataWatches []string + ExistWatches []string + ChildWatches []string +} + +type setWatchesResponse struct{} + +type syncRequest pathRequest +type syncResponse pathResponse + +type setAuthRequest auth +type setAuthResponse struct{} + +type multiRequestOp struct { + Header multiHeader + Op interface{} +} +type multiRequest struct { + Ops []multiRequestOp + DoneHeader multiHeader +} +type multiResponseOp struct { + Header multiHeader + String string + Stat *Stat + Err ErrCode +} +type multiResponse struct { + Ops []multiResponseOp + DoneHeader multiHeader +} + +func (r *multiRequest) Encode(buf []byte) (int, error) { + total := 0 + for _, op := range r.Ops { + op.Header.Done = false + n, err := encodePacketValue(buf[total:], reflect.ValueOf(op)) + if err != nil { + return total, err + } + total += n + } + r.DoneHeader.Done = true + n, err := encodePacketValue(buf[total:], reflect.ValueOf(r.DoneHeader)) + if err != nil { + return total, err + } + total += n + + return total, nil +} + +func (r *multiRequest) Decode(buf []byte) (int, error) { + r.Ops = make([]multiRequestOp, 0) + r.DoneHeader = multiHeader{-1, true, -1} + total := 0 + for { + header := &multiHeader{} + n, err := decodePacketValue(buf[total:], reflect.ValueOf(header)) + if err != nil { + return total, err + } + total += n + if header.Done { + r.DoneHeader = *header + break + } + + req := requestStructForOp(header.Type) + if req == nil { + return total, ErrAPIError + } + n, err = decodePacketValue(buf[total:], reflect.ValueOf(req)) + if err != nil { + return total, err + } + total += n + r.Ops = append(r.Ops, multiRequestOp{*header, req}) + } + return total, nil +} + +func (r *multiResponse) Decode(buf []byte) (int, error) { + var multiErr error + + r.Ops = make([]multiResponseOp, 0) + r.DoneHeader = multiHeader{-1, true, -1} + total := 0 + for { + header := &multiHeader{} + n, err := decodePacketValue(buf[total:], reflect.ValueOf(header)) + if err != nil { + return total, err + } + total += n + if header.Done { + r.DoneHeader = *header + break + } + + res := multiResponseOp{Header: *header} + var w reflect.Value + switch header.Type { + default: + return total, ErrAPIError + case opError: + w = reflect.ValueOf(&res.Err) + case opCreate: + w = reflect.ValueOf(&res.String) + case opSetData: + res.Stat = new(Stat) + w = reflect.ValueOf(res.Stat) + case opCheck, opDelete: + } + if w.IsValid() { + n, err := decodePacketValue(buf[total:], w) + if err != nil { + return total, err + } + total += n + } + r.Ops = append(r.Ops, res) + if multiErr == nil && res.Err != errOk { + // Use the first error as the error returned from Multi(). + multiErr = res.Err.toError() + } + } + return total, multiErr +} + +type watcherEvent struct { + Type EventType + State State + Path string +} + +type decoder interface { + Decode(buf []byte) (int, error) +} + +type encoder interface { + Encode(buf []byte) (int, error) +} + +func decodePacket(buf []byte, st interface{}) (n int, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" { + err = ErrShortBuffer + } else { + panic(r) + } + } + }() + + v := reflect.ValueOf(st) + if v.Kind() != reflect.Ptr || v.IsNil() { + return 0, ErrPtrExpected + } + return decodePacketValue(buf, v) +} + +func decodePacketValue(buf []byte, v reflect.Value) (int, error) { + rv := v + kind := v.Kind() + if kind == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + kind = v.Kind() + } + + n := 0 + switch kind { + default: + return n, ErrUnhandledFieldType + case reflect.Struct: + if de, ok := rv.Interface().(decoder); ok { + return de.Decode(buf) + } else if de, ok := v.Interface().(decoder); ok { + return de.Decode(buf) + } else { + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + n2, err := decodePacketValue(buf[n:], field) + n += n2 + if err != nil { + return n, err + } + } + } + case reflect.Bool: + v.SetBool(buf[n] != 0) + n++ + case reflect.Int32: + v.SetInt(int64(binary.BigEndian.Uint32(buf[n : n+4]))) + n += 4 + case reflect.Int64: + v.SetInt(int64(binary.BigEndian.Uint64(buf[n : n+8]))) + n += 8 + case reflect.String: + ln := int(binary.BigEndian.Uint32(buf[n : n+4])) + v.SetString(string(buf[n+4 : n+4+ln])) + n += 4 + ln + case reflect.Slice: + switch v.Type().Elem().Kind() { + default: + count := int(binary.BigEndian.Uint32(buf[n : n+4])) + n += 4 + values := reflect.MakeSlice(v.Type(), count, count) + v.Set(values) + for i := 0; i < count; i++ { + n2, err := decodePacketValue(buf[n:], values.Index(i)) + n += n2 + if err != nil { + return n, err + } + } + case reflect.Uint8: + ln := int(int32(binary.BigEndian.Uint32(buf[n : n+4]))) + if ln < 0 { + n += 4 + v.SetBytes(nil) + } else { + bytes := make([]byte, ln) + copy(bytes, buf[n+4:n+4+ln]) + v.SetBytes(bytes) + n += 4 + ln + } + } + } + return n, nil +} + +func encodePacket(buf []byte, st interface{}) (n int, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" { + err = ErrShortBuffer + } else { + panic(r) + } + } + }() + + v := reflect.ValueOf(st) + if v.Kind() != reflect.Ptr || v.IsNil() { + return 0, ErrPtrExpected + } + return encodePacketValue(buf, v) +} + +func encodePacketValue(buf []byte, v reflect.Value) (int, error) { + rv := v + for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + v = v.Elem() + } + + n := 0 + switch v.Kind() { + default: + return n, ErrUnhandledFieldType + case reflect.Struct: + if en, ok := rv.Interface().(encoder); ok { + return en.Encode(buf) + } else if en, ok := v.Interface().(encoder); ok { + return en.Encode(buf) + } else { + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + n2, err := encodePacketValue(buf[n:], field) + n += n2 + if err != nil { + return n, err + } + } + } + case reflect.Bool: + if v.Bool() { + buf[n] = 1 + } else { + buf[n] = 0 + } + n++ + case reflect.Int32: + binary.BigEndian.PutUint32(buf[n:n+4], uint32(v.Int())) + n += 4 + case reflect.Int64: + binary.BigEndian.PutUint64(buf[n:n+8], uint64(v.Int())) + n += 8 + case reflect.String: + str := v.String() + binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(str))) + copy(buf[n+4:n+4+len(str)], []byte(str)) + n += 4 + len(str) + case reflect.Slice: + switch v.Type().Elem().Kind() { + default: + count := v.Len() + startN := n + n += 4 + for i := 0; i < count; i++ { + n2, err := encodePacketValue(buf[n:], v.Index(i)) + n += n2 + if err != nil { + return n, err + } + } + binary.BigEndian.PutUint32(buf[startN:startN+4], uint32(count)) + case reflect.Uint8: + if v.IsNil() { + binary.BigEndian.PutUint32(buf[n:n+4], uint32(0xffffffff)) + n += 4 + } else { + bytes := v.Bytes() + binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(bytes))) + copy(buf[n+4:n+4+len(bytes)], bytes) + n += 4 + len(bytes) + } + } + } + return n, nil +} + +func requestStructForOp(op int32) interface{} { + switch op { + case opClose: + return &closeRequest{} + case opCreate: + return &CreateRequest{} + case opDelete: + return &DeleteRequest{} + case opExists: + return &existsRequest{} + case opGetAcl: + return &getAclRequest{} + case opGetChildren: + return &getChildrenRequest{} + case opGetChildren2: + return &getChildren2Request{} + case opGetData: + return &getDataRequest{} + case opPing: + return &pingRequest{} + case opSetAcl: + return &setAclRequest{} + case opSetData: + return &SetDataRequest{} + case opSetWatches: + return &setWatchesRequest{} + case opSync: + return &syncRequest{} + case opSetAuth: + return &setAuthRequest{} + case opCheck: + return &CheckVersionRequest{} + case opMulti: + return &multiRequest{} + } + return nil +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/structs_test.go b/vendor/github.com/samuel/go-zookeeper/zk/structs_test.go new file mode 100644 index 00000000..a3f27974 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/structs_test.go @@ -0,0 +1,83 @@ +package zk + +import ( + "reflect" + "testing" +) + +func TestEncodeDecodePacket(t *testing.T) { + t.Parallel() + encodeDecodeTest(t, &requestHeader{-2, 5}) + encodeDecodeTest(t, &connectResponse{1, 2, 3, nil}) + encodeDecodeTest(t, &connectResponse{1, 2, 3, []byte{4, 5, 6}}) + encodeDecodeTest(t, &getAclResponse{[]ACL{{12, "s", "anyone"}}, Stat{}}) + encodeDecodeTest(t, &getChildrenResponse{[]string{"foo", "bar"}}) + encodeDecodeTest(t, &pathWatchRequest{"path", true}) + encodeDecodeTest(t, &pathWatchRequest{"path", false}) + encodeDecodeTest(t, &CheckVersionRequest{"/", -1}) + encodeDecodeTest(t, &multiRequest{Ops: []multiRequestOp{{multiHeader{opCheck, false, -1}, &CheckVersionRequest{"/", -1}}}}) +} + +func TestRequestStructForOp(t *testing.T) { + for op, name := range opNames { + if op != opNotify && op != opWatcherEvent { + if s := requestStructForOp(op); s == nil { + t.Errorf("No struct for op %s", name) + } + } + } +} + +func encodeDecodeTest(t *testing.T, r interface{}) { + buf := make([]byte, 1024) + n, err := encodePacket(buf, r) + if err != nil { + t.Errorf("encodePacket returned non-nil error %+v\n", err) + return + } + t.Logf("%+v %x", r, buf[:n]) + r2 := reflect.New(reflect.ValueOf(r).Elem().Type()).Interface() + n2, err := decodePacket(buf[:n], r2) + if err != nil { + t.Errorf("decodePacket returned non-nil error %+v\n", err) + return + } + if n != n2 { + t.Errorf("sizes don't match: %d != %d", n, n2) + return + } + if !reflect.DeepEqual(r, r2) { + t.Errorf("results don't match: %+v != %+v", r, r2) + return + } +} + +func TestEncodeShortBuffer(t *testing.T) { + t.Parallel() + _, err := encodePacket([]byte{}, &requestHeader{1, 2}) + if err != ErrShortBuffer { + t.Errorf("encodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err) + return + } +} + +func TestDecodeShortBuffer(t *testing.T) { + t.Parallel() + _, err := decodePacket([]byte{}, &responseHeader{}) + if err != ErrShortBuffer { + t.Errorf("decodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err) + return + } +} + +func BenchmarkEncode(b *testing.B) { + buf := make([]byte, 4096) + st := &connectRequest{Passwd: []byte("1234567890")} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := encodePacket(buf, st); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/throttle_test.go b/vendor/github.com/samuel/go-zookeeper/zk/throttle_test.go new file mode 100644 index 00000000..633ce05f --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/throttle_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Vendored from go4.org/net/throttle + +package zk + +import ( + "fmt" + "net" + "sync" + "time" +) + +const unitSize = 1400 // read/write chunk size. ~MTU size. + +type Rate struct { + KBps int // or 0, to not rate-limit bandwidth + Latency time.Duration +} + +// byteTime returns the time required for n bytes. +func (r Rate) byteTime(n int) time.Duration { + if r.KBps == 0 { + return 0 + } + return time.Duration(float64(n)/1024/float64(r.KBps)) * time.Second +} + +type Listener struct { + net.Listener + Down Rate // server Writes to Client + Up Rate // server Reads from client +} + +func (ln *Listener) Accept() (net.Conn, error) { + c, err := ln.Listener.Accept() + time.Sleep(ln.Up.Latency) + if err != nil { + return nil, err + } + tc := &conn{Conn: c, Down: ln.Down, Up: ln.Up} + tc.start() + return tc, nil +} + +type nErr struct { + n int + err error +} + +type writeReq struct { + writeAt time.Time + p []byte + resc chan nErr +} + +type conn struct { + net.Conn + Down Rate // for reads + Up Rate // for writes + + wchan chan writeReq + closeOnce sync.Once + closeErr error +} + +func (c *conn) start() { + c.wchan = make(chan writeReq, 1024) + go c.writeLoop() +} + +func (c *conn) writeLoop() { + for req := range c.wchan { + time.Sleep(req.writeAt.Sub(time.Now())) + var res nErr + for len(req.p) > 0 && res.err == nil { + writep := req.p + if len(writep) > unitSize { + writep = writep[:unitSize] + } + n, err := c.Conn.Write(writep) + time.Sleep(c.Up.byteTime(len(writep))) + res.n += n + res.err = err + req.p = req.p[n:] + } + req.resc <- res + } +} + +func (c *conn) Close() error { + c.closeOnce.Do(func() { + err := c.Conn.Close() + close(c.wchan) + c.closeErr = err + }) + return c.closeErr +} + +func (c *conn) Write(p []byte) (n int, err error) { + defer func() { + if e := recover(); e != nil { + n = 0 + err = fmt.Errorf("%v", err) + return + } + }() + resc := make(chan nErr, 1) + c.wchan <- writeReq{time.Now().Add(c.Up.Latency), p, resc} + res := <-resc + return res.n, res.err +} + +func (c *conn) Read(p []byte) (n int, err error) { + const max = 1024 + if len(p) > max { + p = p[:max] + } + n, err = c.Conn.Read(p) + time.Sleep(c.Down.byteTime(n)) + return +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/util.go b/vendor/github.com/samuel/go-zookeeper/zk/util.go new file mode 100644 index 00000000..f40a5b15 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/util.go @@ -0,0 +1,116 @@ +package zk + +import ( + "crypto/sha1" + "encoding/base64" + "fmt" + "math/rand" + "strconv" + "strings" + "unicode/utf8" +) + +// AuthACL produces an ACL list containing a single ACL which uses the +// provided permissions, with the scheme "auth", and ID "", which is used +// by ZooKeeper to represent any authenticated user. +func AuthACL(perms int32) []ACL { + return []ACL{{perms, "auth", ""}} +} + +// WorldACL produces an ACL list containing a single ACL which uses the +// provided permissions, with the scheme "world", and ID "anyone", which +// is used by ZooKeeper to represent any user at all. +func WorldACL(perms int32) []ACL { + return []ACL{{perms, "world", "anyone"}} +} + +func DigestACL(perms int32, user, password string) []ACL { + userPass := []byte(fmt.Sprintf("%s:%s", user, password)) + h := sha1.New() + if n, err := h.Write(userPass); err != nil || n != len(userPass) { + panic("SHA1 failed") + } + digest := base64.StdEncoding.EncodeToString(h.Sum(nil)) + return []ACL{{perms, "digest", fmt.Sprintf("%s:%s", user, digest)}} +} + +// FormatServers takes a slice of addresses, and makes sure they are in a format +// that resembles :. If the server has no port provided, the +// DefaultPort constant is added to the end. +func FormatServers(servers []string) []string { + for i := range servers { + if !strings.Contains(servers[i], ":") { + servers[i] = servers[i] + ":" + strconv.Itoa(DefaultPort) + } + } + return servers +} + +// stringShuffle performs a Fisher-Yates shuffle on a slice of strings +func stringShuffle(s []string) { + for i := len(s) - 1; i > 0; i-- { + j := rand.Intn(i + 1) + s[i], s[j] = s[j], s[i] + } +} + +// validatePath will make sure a path is valid before sending the request +func validatePath(path string, isSequential bool) error { + if path == "" { + return ErrInvalidPath + } + + if path[0] != '/' { + return ErrInvalidPath + } + + n := len(path) + if n == 1 { + // path is just the root + return nil + } + + if !isSequential && path[n-1] == '/' { + return ErrInvalidPath + } + + // Start at rune 1 since we already know that the first character is + // a '/'. + for i, w := 1, 0; i < n; i += w { + r, width := utf8.DecodeRuneInString(path[i:]) + switch { + case r == '\u0000': + return ErrInvalidPath + case r == '/': + last, _ := utf8.DecodeLastRuneInString(path[:i]) + if last == '/' { + return ErrInvalidPath + } + case r == '.': + last, lastWidth := utf8.DecodeLastRuneInString(path[:i]) + + // Check for double dot + if last == '.' { + last, _ = utf8.DecodeLastRuneInString(path[:i-lastWidth]) + } + + if last == '/' { + if i+1 == n { + return ErrInvalidPath + } + + next, _ := utf8.DecodeRuneInString(path[i+w:]) + if next == '/' { + return ErrInvalidPath + } + } + case r >= '\u0000' && r <= '\u001f', + r >= '\u007f' && r <= '\u009f', + r >= '\uf000' && r <= '\uf8ff', + r >= '\ufff0' && r < '\uffff': + return ErrInvalidPath + } + w = width + } + return nil +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/util_test.go b/vendor/github.com/samuel/go-zookeeper/zk/util_test.go new file mode 100644 index 00000000..53a59505 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/util_test.go @@ -0,0 +1,53 @@ +package zk + +import "testing" + +func TestFormatServers(t *testing.T) { + t.Parallel() + servers := []string{"127.0.0.1:2181", "127.0.0.42", "127.0.42.1:8811"} + r := []string{"127.0.0.1:2181", "127.0.0.42:2181", "127.0.42.1:8811"} + for i, s := range FormatServers(servers) { + if s != r[i] { + t.Errorf("%v should equal %v", s, r[i]) + } + } +} + +func TestValidatePath(t *testing.T) { + tt := []struct { + path string + seq bool + valid bool + }{ + {"/this is / a valid/path", false, true}, + {"/", false, true}, + {"", false, false}, + {"not/valid", false, false}, + {"/ends/with/slash/", false, false}, + {"/sequential/", true, true}, + {"/test\u0000", false, false}, + {"/double//slash", false, false}, + {"/single/./period", false, false}, + {"/double/../period", false, false}, + {"/double/..ok/period", false, true}, + {"/double/alsook../period", false, true}, + {"/double/period/at/end/..", false, false}, + {"/name/with.period", false, true}, + {"/test\u0001", false, false}, + {"/test\u001f", false, false}, + {"/test\u0020", false, true}, // first allowable + {"/test\u007e", false, true}, // last valid ascii + {"/test\u007f", false, false}, + {"/test\u009f", false, false}, + {"/test\uf8ff", false, false}, + {"/test\uffef", false, true}, + {"/test\ufff0", false, false}, + } + + for _, tc := range tt { + err := validatePath(tc.path, tc.seq) + if (err != nil) == tc.valid { + t.Errorf("failed to validate path %q", tc.path) + } + } +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/zk_test.go b/vendor/github.com/samuel/go-zookeeper/zk/zk_test.go new file mode 100644 index 00000000..c81ef9fb --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/zk_test.go @@ -0,0 +1,939 @@ +package zk + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "io" + "net" + "reflect" + "regexp" + "sort" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestStateChanges(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + + callbackChan := make(chan Event) + f := func(event Event) { + callbackChan <- event + } + + zk, eventChan, err := ts.ConnectWithOptions(15*time.Second, WithEventCallback(f)) + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + + verifyEventOrder := func(c <-chan Event, expectedStates []State, source string) { + for _, state := range expectedStates { + for { + event, ok := <-c + if !ok { + t.Fatalf("unexpected channel close for %s", source) + } + + if event.Type != EventSession { + continue + } + + if event.State != state { + t.Fatalf("mismatched state order from %s, expected %v, received %v", source, state, event.State) + } + break + } + } + } + + states := []State{StateConnecting, StateConnected, StateHasSession} + verifyEventOrder(callbackChan, states, "callback") + verifyEventOrder(eventChan, states, "event channel") + + zk.Close() + verifyEventOrder(callbackChan, []State{StateDisconnected}, "callback") + verifyEventOrder(eventChan, []State{StateDisconnected}, "event channel") +} + +func TestCreate(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + path := "/gozk-test" + + if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create returned error: %+v", err) + } else if p != path { + t.Fatalf("Create returned different path '%s' != '%s'", p, path) + } + if data, stat, err := zk.Get(path); err != nil { + t.Fatalf("Get returned error: %+v", err) + } else if stat == nil { + t.Fatal("Get returned nil stat") + } else if len(data) < 4 { + t.Fatal("Get returned wrong size data") + } +} + +func TestMulti(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + path := "/gozk-test" + + if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + ops := []interface{}{ + &CreateRequest{Path: path, Data: []byte{1, 2, 3, 4}, Acl: WorldACL(PermAll)}, + &SetDataRequest{Path: path, Data: []byte{1, 2, 3, 4}, Version: -1}, + } + if res, err := zk.Multi(ops...); err != nil { + t.Fatalf("Multi returned error: %+v", err) + } else if len(res) != 2 { + t.Fatalf("Expected 2 responses got %d", len(res)) + } else { + t.Logf("%+v", res) + } + if data, stat, err := zk.Get(path); err != nil { + t.Fatalf("Get returned error: %+v", err) + } else if stat == nil { + t.Fatal("Get returned nil stat") + } else if len(data) < 4 { + t.Fatal("Get returned wrong size data") + } +} + +func TestIfAuthdataSurvivesReconnect(t *testing.T) { + // This test case ensures authentication data is being resubmited after + // reconnect. + testNode := "/auth-testnode" + + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + acl := DigestACL(PermAll, "userfoo", "passbar") + + _, err = zk.Create(testNode, []byte("Some very secret content"), 0, acl) + if err != nil && err != ErrNodeExists { + t.Fatalf("Failed to create test node : %+v", err) + } + + _, _, err = zk.Get(testNode) + if err == nil || err != ErrNoAuth { + var msg string + + if err == nil { + msg = "Fetching data without auth should have resulted in an error" + } else { + msg = fmt.Sprintf("Expecting ErrNoAuth, got `%+v` instead", err) + } + t.Fatalf(msg) + } + + zk.AddAuth("digest", []byte("userfoo:passbar")) + + _, _, err = zk.Get(testNode) + if err != nil { + t.Fatalf("Fetching data with auth failed: %+v", err) + } + + ts.StopAllServers() + ts.StartAllServers() + + _, _, err = zk.Get(testNode) + if err != nil { + t.Fatalf("Fetching data after reconnect failed: %+v", err) + } +} + +func TestMultiFailures(t *testing.T) { + // This test case ensures that we return the errors associated with each + // opeThis in the event a call to Multi() fails. + const firstPath = "/gozk-test-first" + const secondPath = "/gozk-test-second" + + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + // Ensure firstPath doesn't exist and secondPath does. This will cause the + // 2nd operation in the Multi() to fail. + if err := zk.Delete(firstPath, -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + if _, err := zk.Create(secondPath, nil /* data */, 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create returned error: %+v", err) + } + + ops := []interface{}{ + &CreateRequest{Path: firstPath, Data: []byte{1, 2}, Acl: WorldACL(PermAll)}, + &CreateRequest{Path: secondPath, Data: []byte{3, 4}, Acl: WorldACL(PermAll)}, + } + res, err := zk.Multi(ops...) + if err != ErrNodeExists { + t.Fatalf("Multi() didn't return correct error: %+v", err) + } + if len(res) != 2 { + t.Fatalf("Expected 2 responses received %d", len(res)) + } + if res[0].Error != nil { + t.Fatalf("First operation returned an unexpected error %+v", res[0].Error) + } + if res[1].Error != ErrNodeExists { + t.Fatalf("Second operation returned incorrect error %+v", res[1].Error) + } + if _, _, err := zk.Get(firstPath); err != ErrNoNode { + t.Fatalf("Node %s was incorrectly created: %+v", firstPath, err) + } +} + +func TestGetSetACL(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + if err := zk.AddAuth("digest", []byte("blah")); err != nil { + t.Fatalf("AddAuth returned error %+v", err) + } + + path := "/gozk-test" + + if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + if path, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create returned error: %+v", err) + } else if path != "/gozk-test" { + t.Fatalf("Create returned different path '%s' != '/gozk-test'", path) + } + + expected := WorldACL(PermAll) + + if acl, stat, err := zk.GetACL(path); err != nil { + t.Fatalf("GetACL returned error %+v", err) + } else if stat == nil { + t.Fatalf("GetACL returned nil Stat") + } else if len(acl) != 1 || expected[0] != acl[0] { + t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl) + } + + expected = []ACL{{PermAll, "ip", "127.0.0.1"}} + + if stat, err := zk.SetACL(path, expected, -1); err != nil { + t.Fatalf("SetACL returned error %+v", err) + } else if stat == nil { + t.Fatalf("SetACL returned nil Stat") + } + + if acl, stat, err := zk.GetACL(path); err != nil { + t.Fatalf("GetACL returned error %+v", err) + } else if stat == nil { + t.Fatalf("GetACL returned nil Stat") + } else if len(acl) != 1 || expected[0] != acl[0] { + t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl) + } +} + +func TestAuth(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + path := "/gozk-digest-test" + if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + + acl := DigestACL(PermAll, "user", "password") + + if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, acl); err != nil { + t.Fatalf("Create returned error: %+v", err) + } else if p != path { + t.Fatalf("Create returned different path '%s' != '%s'", p, path) + } + + if a, stat, err := zk.GetACL(path); err != nil { + t.Fatalf("GetACL returned error %+v", err) + } else if stat == nil { + t.Fatalf("GetACL returned nil Stat") + } else if len(a) != 1 || acl[0] != a[0] { + t.Fatalf("GetACL mismatch expected %+v instead of %+v", acl, a) + } + + if _, _, err := zk.Get(path); err != ErrNoAuth { + t.Fatalf("Get returned error %+v instead of ErrNoAuth", err) + } + + if err := zk.AddAuth("digest", []byte("user:password")); err != nil { + t.Fatalf("AddAuth returned error %+v", err) + } + + if data, stat, err := zk.Get(path); err != nil { + t.Fatalf("Get returned error %+v", err) + } else if stat == nil { + t.Fatalf("Get returned nil Stat") + } else if len(data) != 4 { + t.Fatalf("Get returned wrong data length") + } +} + +func TestChildren(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + deleteNode := func(node string) { + if err := zk.Delete(node, -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + } + + deleteNode("/gozk-test-big") + + if path, err := zk.Create("/gozk-test-big", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create returned error: %+v", err) + } else if path != "/gozk-test-big" { + t.Fatalf("Create returned different path '%s' != '/gozk-test-big'", path) + } + + rb := make([]byte, 1000) + hb := make([]byte, 2000) + prefix := []byte("/gozk-test-big/") + for i := 0; i < 10000; i++ { + _, err := rand.Read(rb) + if err != nil { + t.Fatal("Cannot create random znode name") + } + hex.Encode(hb, rb) + + expect := string(append(prefix, hb...)) + if path, err := zk.Create(expect, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create returned error: %+v", err) + } else if path != expect { + t.Fatalf("Create returned different path '%s' != '%s'", path, expect) + } + defer deleteNode(string(expect)) + } + + children, _, err := zk.Children("/gozk-test-big") + if err != nil { + t.Fatalf("Children returned error: %+v", err) + } else if len(children) != 10000 { + t.Fatal("Children returned wrong number of nodes") + } +} + +func TestChildWatch(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + + children, stat, childCh, err := zk.ChildrenW("/") + if err != nil { + t.Fatalf("Children returned error: %+v", err) + } else if stat == nil { + t.Fatal("Children returned nil stat") + } else if len(children) < 1 { + t.Fatal("Children should return at least 1 child") + } + + if path, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create returned error: %+v", err) + } else if path != "/gozk-test" { + t.Fatalf("Create returned different path '%s' != '/gozk-test'", path) + } + + select { + case ev := <-childCh: + if ev.Err != nil { + t.Fatalf("Child watcher error %+v", ev.Err) + } + if ev.Path != "/" { + t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") + } + case _ = <-time.After(time.Second * 2): + t.Fatal("Child watcher timed out") + } + + // Delete of the watched node should trigger the watch + + children, stat, childCh, err = zk.ChildrenW("/gozk-test") + if err != nil { + t.Fatalf("Children returned error: %+v", err) + } else if stat == nil { + t.Fatal("Children returned nil stat") + } else if len(children) != 0 { + t.Fatal("Children should return 0 children") + } + + if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + + select { + case ev := <-childCh: + if ev.Err != nil { + t.Fatalf("Child watcher error %+v", ev.Err) + } + if ev.Path != "/gozk-test" { + t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") + } + case _ = <-time.After(time.Second * 2): + t.Fatal("Child watcher timed out") + } +} + +func TestSetWatchers(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + zk.reconnectLatch = make(chan struct{}) + zk.setWatchLimit = 1024 // break up set-watch step into 1k requests + var setWatchReqs atomic.Value + zk.setWatchCallback = func(reqs []*setWatchesRequest) { + setWatchReqs.Store(reqs) + } + + zk2, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk2.Close() + + if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + + testPaths := map[string]<-chan Event{} + defer func() { + // clean up all of the test paths we create + for p := range testPaths { + zk2.Delete(p, -1) + } + }() + + // we create lots of paths to watch, to make sure a "set watches" request + // on re-create will be too big and be required to span multiple packets + for i := 0; i < 1000; i++ { + testPath, err := zk.Create(fmt.Sprintf("/gozk-test-%d", i), []byte{}, 0, WorldACL(PermAll)) + if err != nil { + t.Fatalf("Create returned: %+v", err) + } + testPaths[testPath] = nil + _, _, testEvCh, err := zk.GetW(testPath) + if err != nil { + t.Fatalf("GetW returned: %+v", err) + } + testPaths[testPath] = testEvCh + } + + children, stat, childCh, err := zk.ChildrenW("/") + if err != nil { + t.Fatalf("Children returned error: %+v", err) + } else if stat == nil { + t.Fatal("Children returned nil stat") + } else if len(children) < 1 { + t.Fatal("Children should return at least 1 child") + } + + // Simulate network error by brutally closing the network connection. + zk.conn.Close() + for p := range testPaths { + if err := zk2.Delete(p, -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + } + if path, err := zk2.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { + t.Fatalf("Create returned error: %+v", err) + } else if path != "/gozk-test" { + t.Fatalf("Create returned different path '%s' != '/gozk-test'", path) + } + + time.Sleep(100 * time.Millisecond) + + // zk should still be waiting to reconnect, so none of the watches should have been triggered + for p, ch := range testPaths { + select { + case <-ch: + t.Fatalf("GetW watcher for %q should not have triggered yet", p) + default: + } + } + select { + case <-childCh: + t.Fatalf("ChildrenW watcher should not have triggered yet") + default: + } + + // now we let the reconnect occur and make sure it resets watches + close(zk.reconnectLatch) + + for p, ch := range testPaths { + select { + case ev := <-ch: + if ev.Err != nil { + t.Fatalf("GetW watcher error %+v", ev.Err) + } + if ev.Path != p { + t.Fatalf("GetW watcher wrong path %s instead of %s", ev.Path, p) + } + case <-time.After(2 * time.Second): + t.Fatal("GetW watcher timed out") + } + } + + select { + case ev := <-childCh: + if ev.Err != nil { + t.Fatalf("Child watcher error %+v", ev.Err) + } + if ev.Path != "/" { + t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") + } + case <-time.After(2 * time.Second): + t.Fatal("Child watcher timed out") + } + + // Yay! All watches fired correctly. Now we also inspect the actual set-watch request objects + // to ensure they didn't exceed the expected packet set. + buf := make([]byte, bufferSize) + totalWatches := 0 + actualReqs := setWatchReqs.Load().([]*setWatchesRequest) + if len(actualReqs) < 12 { + // sanity check: we should have generated *at least* 12 requests to reset watches + t.Fatalf("too few setWatchesRequest messages: %d", len(actualReqs)) + } + for _, r := range actualReqs { + totalWatches += len(r.ChildWatches) + len(r.DataWatches) + len(r.ExistWatches) + n, err := encodePacket(buf, r) + if err != nil { + t.Fatalf("encodePacket failed: %v! request:\n%+v", err, r) + } else if n > 1024 { + t.Fatalf("setWatchesRequest exceeded allowed size (%d > 1024)! request:\n%+v", n, r) + } + } + + if totalWatches != len(testPaths)+1 { + t.Fatalf("setWatchesRequests did not include all expected watches; expecting %d, got %d", len(testPaths)+1, totalWatches) + } +} + +func TestExpiringWatch(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + zk, _, err := ts.ConnectAll() + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + + if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { + t.Fatalf("Delete returned error: %+v", err) + } + + children, stat, childCh, err := zk.ChildrenW("/") + if err != nil { + t.Fatalf("Children returned error: %+v", err) + } else if stat == nil { + t.Fatal("Children returned nil stat") + } else if len(children) < 1 { + t.Fatal("Children should return at least 1 child") + } + + zk.sessionID = 99999 + zk.conn.Close() + + select { + case ev := <-childCh: + if ev.Err != ErrSessionExpired { + t.Fatalf("Child watcher error %+v instead of expected ErrSessionExpired", ev.Err) + } + if ev.Path != "/" { + t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") + } + case <-time.After(2 * time.Second): + t.Fatal("Child watcher timed out") + } +} + +func TestRequestFail(t *testing.T) { + // If connecting fails to all servers in the list then pending requests + // should be errored out so they don't hang forever. + + zk, _, err := Connect([]string{"127.0.0.1:32444"}, time.Second*15) + if err != nil { + t.Fatal(err) + } + defer zk.Close() + + ch := make(chan error) + go func() { + _, _, err := zk.Get("/blah") + ch <- err + }() + select { + case err := <-ch: + if err == nil { + t.Fatal("Expected non-nil error on failed request due to connection failure") + } + case <-time.After(time.Second * 2): + t.Fatal("Get hung when connection could not be made") + } +} + +func TestSlowServer(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + + realAddr := fmt.Sprintf("127.0.0.1:%d", ts.Servers[0].Port) + proxyAddr, stopCh, err := startSlowProxy(t, + Rate{}, Rate{}, + realAddr, func(ln *Listener) { + if ln.Up.Latency == 0 { + ln.Up.Latency = time.Millisecond * 2000 + ln.Down.Latency = time.Millisecond * 2000 + } else { + ln.Up.Latency = 0 + ln.Down.Latency = 0 + } + }) + if err != nil { + t.Fatal(err) + } + defer close(stopCh) + + zk, _, err := Connect([]string{proxyAddr}, time.Millisecond*500) + if err != nil { + t.Fatal(err) + } + defer zk.Close() + + _, _, wch, err := zk.ChildrenW("/") + if err != nil { + t.Fatal(err) + } + + // Force a reconnect to get a throttled connection + zk.conn.Close() + + time.Sleep(time.Millisecond * 100) + + if err := zk.Delete("/gozk-test", -1); err == nil { + t.Fatal("Delete should have failed") + } + + // The previous request should have timed out causing the server to be disconnected and reconnected + + if _, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { + t.Fatal(err) + } + + // Make sure event is still returned because the session should not have been affected + select { + case ev := <-wch: + t.Logf("Received event: %+v", ev) + case <-time.After(time.Second): + t.Fatal("Expected to receive a watch event") + } +} + +func startSlowProxy(t *testing.T, up, down Rate, upstream string, adj func(ln *Listener)) (string, chan bool, error) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return "", nil, err + } + tln := &Listener{ + Listener: ln, + Up: up, + Down: down, + } + stopCh := make(chan bool) + go func() { + <-stopCh + tln.Close() + }() + go func() { + for { + cn, err := tln.Accept() + if err != nil { + if !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("Accept failed: %s", err.Error()) + } + return + } + if adj != nil { + adj(tln) + } + go func(cn net.Conn) { + defer cn.Close() + upcn, err := net.Dial("tcp", upstream) + if err != nil { + t.Log(err) + return + } + // This will leave hanging goroutines util stopCh is closed + // but it doesn't matter in the context of running tests. + go func() { + <-stopCh + upcn.Close() + }() + go func() { + if _, err := io.Copy(upcn, cn); err != nil { + if !strings.Contains(err.Error(), "use of closed network connection") { + // log.Printf("Upstream write failed: %s", err.Error()) + } + } + }() + if _, err := io.Copy(cn, upcn); err != nil { + if !strings.Contains(err.Error(), "use of closed network connection") { + // log.Printf("Upstream read failed: %s", err.Error()) + } + } + }(cn) + } + }() + return ln.Addr().String(), stopCh, nil +} + +func TestMaxBufferSize(t *testing.T) { + ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) + if err != nil { + t.Fatal(err) + } + defer ts.Stop() + // no buffer size + zk, _, err := ts.ConnectWithOptions(15 * time.Second) + var l testLogger + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zk.Close() + // 1k buffer size, logs to custom test logger + zkLimited, _, err := ts.ConnectWithOptions(15*time.Second, WithMaxBufferSize(1024), func(conn *Conn) { + conn.SetLogger(&l) + }) + if err != nil { + t.Fatalf("Connect returned error: %+v", err) + } + defer zkLimited.Close() + + // With small node with small number of children + data := []byte{101, 102, 103, 103} + _, err = zk.Create("/foo", data, 0, WorldACL(PermAll)) + if err != nil { + t.Fatalf("Create returned error: %+v", err) + } + var children []string + for i := 0; i < 4; i++ { + childName, err := zk.Create("/foo/child", nil, FlagEphemeral|FlagSequence, WorldACL(PermAll)) + if err != nil { + t.Fatalf("Create returned error: %+v", err) + } + children = append(children, childName[len("/foo/"):]) // strip parent prefix from name + } + sort.Strings(children) + + // Limited client works fine + resultData, _, err := zkLimited.Get("/foo") + if err != nil { + t.Fatalf("Get returned error: %+v", err) + } + if !reflect.DeepEqual(resultData, data) { + t.Fatalf("Get returned unexpected data; expecting %+v, got %+v", data, resultData) + } + resultChildren, _, err := zkLimited.Children("/foo") + if err != nil { + t.Fatalf("Children returned error: %+v", err) + } + sort.Strings(resultChildren) + if !reflect.DeepEqual(resultChildren, children) { + t.Fatalf("Children returned unexpected names; expecting %+v, got %+v", children, resultChildren) + } + + // With large node though... + data = make([]byte, 1024) + for i := 0; i < 1024; i++ { + data[i] = byte(i) + } + _, err = zk.Create("/bar", data, 0, WorldACL(PermAll)) + if err != nil { + t.Fatalf("Create returned error: %+v", err) + } + _, _, err = zkLimited.Get("/bar") + // NB: Sadly, without actually de-serializing the too-large response packet, we can't send the + // right error to the corresponding outstanding request. So the request just sees ErrConnectionClosed + // while the log will see the actual reason the connection was closed. + expectErr(t, err, ErrConnectionClosed) + expectLogMessage(t, &l, "received packet from server with length .*, which exceeds max buffer size 1024") + + // Or with large number of children... + totalLen := 0 + children = nil + for totalLen < 1024 { + childName, err := zk.Create("/bar/child", nil, FlagEphemeral|FlagSequence, WorldACL(PermAll)) + if err != nil { + t.Fatalf("Create returned error: %+v", err) + } + n := childName[len("/bar/"):] // strip parent prefix from name + children = append(children, n) + totalLen += len(n) + } + sort.Strings(children) + _, _, err = zkLimited.Children("/bar") + expectErr(t, err, ErrConnectionClosed) + expectLogMessage(t, &l, "received packet from server with length .*, which exceeds max buffer size 1024") + + // Other client (without buffer size limit) can successfully query the node and its children, of course + resultData, _, err = zk.Get("/bar") + if err != nil { + t.Fatalf("Get returned error: %+v", err) + } + if !reflect.DeepEqual(resultData, data) { + t.Fatalf("Get returned unexpected data; expecting %+v, got %+v", data, resultData) + } + resultChildren, _, err = zk.Children("/bar") + if err != nil { + t.Fatalf("Children returned error: %+v", err) + } + sort.Strings(resultChildren) + if !reflect.DeepEqual(resultChildren, children) { + t.Fatalf("Children returned unexpected names; expecting %+v, got %+v", children, resultChildren) + } +} + +func expectErr(t *testing.T, err error, expected error) { + if err == nil { + t.Fatalf("Get for node that is too large should have returned error!") + } + if err != expected { + t.Fatalf("Get returned wrong error; expecting ErrClosing, got %+v", err) + } +} + +func expectLogMessage(t *testing.T, logger *testLogger, pattern string) { + re := regexp.MustCompile(pattern) + events := logger.Reset() + if len(events) == 0 { + t.Fatalf("Failed to log error; expecting message that matches pattern: %s", pattern) + } + var found []string + for _, e := range events { + if re.Match([]byte(e)) { + found = append(found, e) + } + } + if len(found) == 0 { + t.Fatalf("Failed to log error; expecting message that matches pattern: %s", pattern) + } else if len(found) > 1 { + t.Fatalf("Logged error redundantly %d times:\n%+v", len(found), found) + } +} + +type testLogger struct { + mu sync.Mutex + events []string +} + +func (l *testLogger) Printf(msgFormat string, args ...interface{}) { + msg := fmt.Sprintf(msgFormat, args...) + fmt.Println(msg) + l.mu.Lock() + defer l.mu.Unlock() + l.events = append(l.events, msg) +} + +func (l *testLogger) Reset() []string { + l.mu.Lock() + defer l.mu.Unlock() + ret := l.events + l.events = nil + return ret +} diff --git a/vendor/github.com/sirupsen/logrus/hooks/test/test.go b/vendor/github.com/sirupsen/logrus/hooks/test/test.go deleted file mode 100644 index 62c4845d..00000000 --- a/vendor/github.com/sirupsen/logrus/hooks/test/test.go +++ /dev/null @@ -1,95 +0,0 @@ -// The Test package is used for testing logrus. It is here for backwards -// compatibility from when logrus' organization was upper-case. Please use -// lower-case logrus and the `null` package instead of this one. -package test - -import ( - "io/ioutil" - "sync" - - "github.com/sirupsen/logrus" -) - -// Hook is a hook designed for dealing with logs in test scenarios. -type Hook struct { - // Entries is an array of all entries that have been received by this hook. - // For safe access, use the AllEntries() method, rather than reading this - // value directly. - Entries []*logrus.Entry - mu sync.RWMutex -} - -// NewGlobal installs a test hook for the global logger. -func NewGlobal() *Hook { - - hook := new(Hook) - logrus.AddHook(hook) - - return hook - -} - -// NewLocal installs a test hook for a given local logger. -func NewLocal(logger *logrus.Logger) *Hook { - - hook := new(Hook) - logger.Hooks.Add(hook) - - return hook - -} - -// NewNullLogger creates a discarding logger and installs the test hook. -func NewNullLogger() (*logrus.Logger, *Hook) { - - logger := logrus.New() - logger.Out = ioutil.Discard - - return logger, NewLocal(logger) - -} - -func (t *Hook) Fire(e *logrus.Entry) error { - t.mu.Lock() - defer t.mu.Unlock() - t.Entries = append(t.Entries, e) - return nil -} - -func (t *Hook) Levels() []logrus.Level { - return logrus.AllLevels -} - -// LastEntry returns the last entry that was logged or nil. -func (t *Hook) LastEntry() *logrus.Entry { - t.mu.RLock() - defer t.mu.RUnlock() - i := len(t.Entries) - 1 - if i < 0 { - return nil - } - // Make a copy, for safety - e := *t.Entries[i] - return &e -} - -// AllEntries returns all entries that were logged. -func (t *Hook) AllEntries() []*logrus.Entry { - t.mu.RLock() - defer t.mu.RUnlock() - // Make a copy so the returned value won't race with future log requests - entries := make([]*logrus.Entry, len(t.Entries)) - for i, entry := range t.Entries { - // Make a copy, for safety - e := *entry - entries[i] = &e - } - return entries -} - -// Reset removes all Entries from this test hook. -func (t *Hook) Reset() { - t.mu.Lock() - defer t.mu.Unlock() - t.Entries = make([]*logrus.Entry, 0) -} diff --git a/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go b/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go deleted file mode 100644 index 3f55cfe3..00000000 --- a/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package test - -import ( - "testing" - - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" -) - -func TestAllHooks(t *testing.T) { - - assert := assert.New(t) - - logger, hook := NewNullLogger() - assert.Nil(hook.LastEntry()) - assert.Equal(0, len(hook.Entries)) - - logger.Error("Hello error") - assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal("Hello error", hook.LastEntry().Message) - assert.Equal(1, len(hook.Entries)) - - logger.Warn("Hello warning") - assert.Equal(logrus.WarnLevel, hook.LastEntry().Level) - assert.Equal("Hello warning", hook.LastEntry().Message) - assert.Equal(2, len(hook.Entries)) - - hook.Reset() - assert.Nil(hook.LastEntry()) - assert.Equal(0, len(hook.Entries)) - - hook = NewGlobal() - - logrus.Error("Hello error") - assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal("Hello error", hook.LastEntry().Message) - assert.Equal(1, len(hook.Entries)) - -} diff --git a/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go deleted file mode 100644 index f481253c..00000000 --- a/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package test - -import ( - "bytes" - "testing" - - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" -) - -func TestAgentForward(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - defer conn.Close() - - keyring := agent.NewKeyring() - if err := keyring.Add(agent.AddedKey{PrivateKey: testPrivateKeys["dsa"]}); err != nil { - t.Fatalf("Error adding key: %s", err) - } - if err := keyring.Add(agent.AddedKey{ - PrivateKey: testPrivateKeys["dsa"], - ConfirmBeforeUse: true, - LifetimeSecs: 3600, - }); err != nil { - t.Fatalf("Error adding key with constraints: %s", err) - } - pub := testPublicKeys["dsa"] - - sess, err := conn.NewSession() - if err != nil { - t.Fatalf("NewSession: %v", err) - } - if err := agent.RequestAgentForwarding(sess); err != nil { - t.Fatalf("RequestAgentForwarding: %v", err) - } - - if err := agent.ForwardToAgent(conn, keyring); err != nil { - t.Fatalf("SetupForwardKeyring: %v", err) - } - out, err := sess.CombinedOutput("ssh-add -L") - if err != nil { - t.Fatalf("running ssh-add: %v, out %s", err, out) - } - key, _, _, _, err := ssh.ParseAuthorizedKey(out) - if err != nil { - t.Fatalf("ParseAuthorizedKey(%q): %v", out, err) - } - - if !bytes.Equal(key.Marshal(), pub.Marshal()) { - t.Fatalf("got key %s, want %s", ssh.MarshalAuthorizedKey(key), ssh.MarshalAuthorizedKey(pub)) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/test/cert_test.go b/vendor/golang.org/x/crypto/ssh/test/cert_test.go deleted file mode 100644 index b231dd80..00000000 --- a/vendor/golang.org/x/crypto/ssh/test/cert_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package test - -import ( - "bytes" - "crypto/rand" - "testing" - - "golang.org/x/crypto/ssh" -) - -// Test both logging in with a cert, and also that the certificate presented by an OpenSSH host can be validated correctly -func TestCertLogin(t *testing.T) { - s := newServer(t) - defer s.Shutdown() - - // Use a key different from the default. - clientKey := testSigners["dsa"] - caAuthKey := testSigners["ecdsa"] - cert := &ssh.Certificate{ - Key: clientKey.PublicKey(), - ValidPrincipals: []string{username()}, - CertType: ssh.UserCert, - ValidBefore: ssh.CertTimeInfinity, - } - if err := cert.SignCert(rand.Reader, caAuthKey); err != nil { - t.Fatalf("SetSignature: %v", err) - } - - certSigner, err := ssh.NewCertSigner(cert, clientKey) - if err != nil { - t.Fatalf("NewCertSigner: %v", err) - } - - conf := &ssh.ClientConfig{ - User: username(), - HostKeyCallback: (&ssh.CertChecker{ - IsHostAuthority: func(pk ssh.PublicKey, addr string) bool { - return bytes.Equal(pk.Marshal(), testPublicKeys["ca"].Marshal()) - }, - }).CheckHostKey, - } - conf.Auth = append(conf.Auth, ssh.PublicKeys(certSigner)) - - for _, test := range []struct { - addr string - succeed bool - }{ - {addr: "host.example.com:22", succeed: true}, - {addr: "host.example.com:10000", succeed: true}, // non-standard port must be OK - {addr: "host.example.com", succeed: false}, // port must be specified - {addr: "host.ex4mple.com:22", succeed: false}, // wrong host - } { - client, err := s.TryDialWithAddr(conf, test.addr) - - // Always close client if opened successfully - if err == nil { - client.Close() - } - - // Now evaluate whether the test failed or passed - if test.succeed { - if err != nil { - t.Fatalf("TryDialWithAddr: %v", err) - } - } else { - if err == nil { - t.Fatalf("TryDialWithAddr, unexpected success") - } - } - } -} diff --git a/vendor/golang.org/x/crypto/ssh/test/dial_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/dial_unix_test.go deleted file mode 100644 index 091e48cc..00000000 --- a/vendor/golang.org/x/crypto/ssh/test/dial_unix_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !windows - -package test - -// direct-tcpip and direct-streamlocal functional tests - -import ( - "fmt" - "io" - "io/ioutil" - "net" - "strings" - "testing" -) - -type dialTester interface { - TestServerConn(t *testing.T, c net.Conn) - TestClientConn(t *testing.T, c net.Conn) -} - -func testDial(t *testing.T, n, listenAddr string, x dialTester) { - server := newServer(t) - defer server.Shutdown() - sshConn := server.Dial(clientConfig()) - defer sshConn.Close() - - l, err := net.Listen(n, listenAddr) - if err != nil { - t.Fatalf("Listen: %v", err) - } - defer l.Close() - - testData := fmt.Sprintf("hello from %s, %s", n, listenAddr) - go func() { - for { - c, err := l.Accept() - if err != nil { - break - } - x.TestServerConn(t, c) - - io.WriteString(c, testData) - c.Close() - } - }() - - conn, err := sshConn.Dial(n, l.Addr().String()) - if err != nil { - t.Fatalf("Dial: %v", err) - } - x.TestClientConn(t, conn) - defer conn.Close() - b, err := ioutil.ReadAll(conn) - if err != nil { - t.Fatalf("ReadAll: %v", err) - } - t.Logf("got %q", string(b)) - if string(b) != testData { - t.Fatalf("expected %q, got %q", testData, string(b)) - } -} - -type tcpDialTester struct { - listenAddr string -} - -func (x *tcpDialTester) TestServerConn(t *testing.T, c net.Conn) { - host := strings.Split(x.listenAddr, ":")[0] - prefix := host + ":" - if !strings.HasPrefix(c.LocalAddr().String(), prefix) { - t.Fatalf("expected to start with %q, got %q", prefix, c.LocalAddr().String()) - } - if !strings.HasPrefix(c.RemoteAddr().String(), prefix) { - t.Fatalf("expected to start with %q, got %q", prefix, c.RemoteAddr().String()) - } -} - -func (x *tcpDialTester) TestClientConn(t *testing.T, c net.Conn) { - // we use zero addresses. see *Client.Dial. - if c.LocalAddr().String() != "0.0.0.0:0" { - t.Fatalf("expected \"0.0.0.0:0\", got %q", c.LocalAddr().String()) - } - if c.RemoteAddr().String() != "0.0.0.0:0" { - t.Fatalf("expected \"0.0.0.0:0\", got %q", c.RemoteAddr().String()) - } -} - -func TestDialTCP(t *testing.T) { - x := &tcpDialTester{ - listenAddr: "127.0.0.1:0", - } - testDial(t, "tcp", x.listenAddr, x) -} - -type unixDialTester struct { - listenAddr string -} - -func (x *unixDialTester) TestServerConn(t *testing.T, c net.Conn) { - if c.LocalAddr().String() != x.listenAddr { - t.Fatalf("expected %q, got %q", x.listenAddr, c.LocalAddr().String()) - } - if c.RemoteAddr().String() != "@" { - t.Fatalf("expected \"@\", got %q", c.RemoteAddr().String()) - } -} - -func (x *unixDialTester) TestClientConn(t *testing.T, c net.Conn) { - if c.RemoteAddr().String() != x.listenAddr { - t.Fatalf("expected %q, got %q", x.listenAddr, c.RemoteAddr().String()) - } - if c.LocalAddr().String() != "@" { - t.Fatalf("expected \"@\", got %q", c.LocalAddr().String()) - } -} - -func TestDialUnix(t *testing.T) { - addr, cleanup := newTempSocket(t) - defer cleanup() - x := &unixDialTester{ - listenAddr: addr, - } - testDial(t, "unix", x.listenAddr, x) -} diff --git a/vendor/golang.org/x/crypto/ssh/test/doc.go b/vendor/golang.org/x/crypto/ssh/test/doc.go deleted file mode 100644 index 3f9b3346..00000000 --- a/vendor/golang.org/x/crypto/ssh/test/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This package contains integration tests for the -// golang.org/x/crypto/ssh package. -package test // import "golang.org/x/crypto/ssh/test" diff --git a/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go deleted file mode 100644 index ea819378..00000000 --- a/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package test - -import ( - "bytes" - "io" - "io/ioutil" - "math/rand" - "net" - "testing" - "time" -) - -type closeWriter interface { - CloseWrite() error -} - -func testPortForward(t *testing.T, n, listenAddr string) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - defer conn.Close() - - sshListener, err := conn.Listen(n, listenAddr) - if err != nil { - t.Fatal(err) - } - - go func() { - sshConn, err := sshListener.Accept() - if err != nil { - t.Fatalf("listen.Accept failed: %v", err) - } - - _, err = io.Copy(sshConn, sshConn) - if err != nil && err != io.EOF { - t.Fatalf("ssh client copy: %v", err) - } - sshConn.Close() - }() - - forwardedAddr := sshListener.Addr().String() - netConn, err := net.Dial(n, forwardedAddr) - if err != nil { - t.Fatalf("net dial failed: %v", err) - } - - readChan := make(chan []byte) - go func() { - data, _ := ioutil.ReadAll(netConn) - readChan <- data - }() - - // Invent some data. - data := make([]byte, 100*1000) - for i := range data { - data[i] = byte(i % 255) - } - - var sent []byte - for len(sent) < 1000*1000 { - // Send random sized chunks - m := rand.Intn(len(data)) - n, err := netConn.Write(data[:m]) - if err != nil { - break - } - sent = append(sent, data[:n]...) - } - if err := netConn.(closeWriter).CloseWrite(); err != nil { - t.Errorf("netConn.CloseWrite: %v", err) - } - - read := <-readChan - - if len(sent) != len(read) { - t.Fatalf("got %d bytes, want %d", len(read), len(sent)) - } - if bytes.Compare(sent, read) != 0 { - t.Fatalf("read back data does not match") - } - - if err := sshListener.Close(); err != nil { - t.Fatalf("sshListener.Close: %v", err) - } - - // Check that the forward disappeared. - netConn, err = net.Dial(n, forwardedAddr) - if err == nil { - netConn.Close() - t.Errorf("still listening to %s after closing", forwardedAddr) - } -} - -func TestPortForwardTCP(t *testing.T) { - testPortForward(t, "tcp", "localhost:0") -} - -func TestPortForwardUnix(t *testing.T) { - addr, cleanup := newTempSocket(t) - defer cleanup() - testPortForward(t, "unix", addr) -} - -func testAcceptClose(t *testing.T, n, listenAddr string) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - - sshListener, err := conn.Listen(n, listenAddr) - if err != nil { - t.Fatal(err) - } - - quit := make(chan error, 1) - go func() { - for { - c, err := sshListener.Accept() - if err != nil { - quit <- err - break - } - c.Close() - } - }() - sshListener.Close() - - select { - case <-time.After(1 * time.Second): - t.Errorf("timeout: listener did not close.") - case err := <-quit: - t.Logf("quit as expected (error %v)", err) - } -} - -func TestAcceptCloseTCP(t *testing.T) { - testAcceptClose(t, "tcp", "localhost:0") -} - -func TestAcceptCloseUnix(t *testing.T) { - addr, cleanup := newTempSocket(t) - defer cleanup() - testAcceptClose(t, "unix", addr) -} - -// Check that listeners exit if the underlying client transport dies. -func testPortForwardConnectionClose(t *testing.T, n, listenAddr string) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - - sshListener, err := conn.Listen(n, listenAddr) - if err != nil { - t.Fatal(err) - } - - quit := make(chan error, 1) - go func() { - for { - c, err := sshListener.Accept() - if err != nil { - quit <- err - break - } - c.Close() - } - }() - - // It would be even nicer if we closed the server side, but it - // is more involved as the fd for that side is dup()ed. - server.clientConn.Close() - - select { - case <-time.After(1 * time.Second): - t.Errorf("timeout: listener did not close.") - case err := <-quit: - t.Logf("quit as expected (error %v)", err) - } -} - -func TestPortForwardConnectionCloseTCP(t *testing.T) { - testPortForwardConnectionClose(t, "tcp", "localhost:0") -} - -func TestPortForwardConnectionCloseUnix(t *testing.T) { - addr, cleanup := newTempSocket(t) - defer cleanup() - testPortForwardConnectionClose(t, "unix", addr) -} diff --git a/vendor/golang.org/x/crypto/ssh/test/session_test.go b/vendor/golang.org/x/crypto/ssh/test/session_test.go deleted file mode 100644 index 8238d9d9..00000000 --- a/vendor/golang.org/x/crypto/ssh/test/session_test.go +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !windows - -package test - -// Session functional tests. - -import ( - "bytes" - "errors" - "io" - "strings" - "testing" - - "golang.org/x/crypto/ssh" -) - -func TestRunCommandSuccess(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - defer conn.Close() - - session, err := conn.NewSession() - if err != nil { - t.Fatalf("session failed: %v", err) - } - defer session.Close() - err = session.Run("true") - if err != nil { - t.Fatalf("session failed: %v", err) - } -} - -func TestHostKeyCheck(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - - conf := clientConfig() - hostDB := hostKeyDB() - conf.HostKeyCallback = hostDB.Check - - // change the keys. - hostDB.keys[ssh.KeyAlgoRSA][25]++ - hostDB.keys[ssh.KeyAlgoDSA][25]++ - hostDB.keys[ssh.KeyAlgoECDSA256][25]++ - - conn, err := server.TryDial(conf) - if err == nil { - conn.Close() - t.Fatalf("dial should have failed.") - } else if !strings.Contains(err.Error(), "host key mismatch") { - t.Fatalf("'host key mismatch' not found in %v", err) - } -} - -func TestRunCommandStdin(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - defer conn.Close() - - session, err := conn.NewSession() - if err != nil { - t.Fatalf("session failed: %v", err) - } - defer session.Close() - - r, w := io.Pipe() - defer r.Close() - defer w.Close() - session.Stdin = r - - err = session.Run("true") - if err != nil { - t.Fatalf("session failed: %v", err) - } -} - -func TestRunCommandStdinError(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - defer conn.Close() - - session, err := conn.NewSession() - if err != nil { - t.Fatalf("session failed: %v", err) - } - defer session.Close() - - r, w := io.Pipe() - defer r.Close() - session.Stdin = r - pipeErr := errors.New("closing write end of pipe") - w.CloseWithError(pipeErr) - - err = session.Run("true") - if err != pipeErr { - t.Fatalf("expected %v, found %v", pipeErr, err) - } -} - -func TestRunCommandFailed(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - defer conn.Close() - - session, err := conn.NewSession() - if err != nil { - t.Fatalf("session failed: %v", err) - } - defer session.Close() - err = session.Run(`bash -c "kill -9 $$"`) - if err == nil { - t.Fatalf("session succeeded: %v", err) - } -} - -func TestRunCommandWeClosed(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - defer conn.Close() - - session, err := conn.NewSession() - if err != nil { - t.Fatalf("session failed: %v", err) - } - err = session.Shell() - if err != nil { - t.Fatalf("shell failed: %v", err) - } - err = session.Close() - if err != nil { - t.Fatalf("shell failed: %v", err) - } -} - -func TestFuncLargeRead(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - defer conn.Close() - - session, err := conn.NewSession() - if err != nil { - t.Fatalf("unable to create new session: %s", err) - } - - stdout, err := session.StdoutPipe() - if err != nil { - t.Fatalf("unable to acquire stdout pipe: %s", err) - } - - err = session.Start("dd if=/dev/urandom bs=2048 count=1024") - if err != nil { - t.Fatalf("unable to execute remote command: %s", err) - } - - buf := new(bytes.Buffer) - n, err := io.Copy(buf, stdout) - if err != nil { - t.Fatalf("error reading from remote stdout: %s", err) - } - - if n != 2048*1024 { - t.Fatalf("Expected %d bytes but read only %d from remote command", 2048, n) - } -} - -func TestKeyChange(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - conf := clientConfig() - hostDB := hostKeyDB() - conf.HostKeyCallback = hostDB.Check - conf.RekeyThreshold = 1024 - conn := server.Dial(conf) - defer conn.Close() - - for i := 0; i < 4; i++ { - session, err := conn.NewSession() - if err != nil { - t.Fatalf("unable to create new session: %s", err) - } - - stdout, err := session.StdoutPipe() - if err != nil { - t.Fatalf("unable to acquire stdout pipe: %s", err) - } - - err = session.Start("dd if=/dev/urandom bs=1024 count=1") - if err != nil { - t.Fatalf("unable to execute remote command: %s", err) - } - buf := new(bytes.Buffer) - n, err := io.Copy(buf, stdout) - if err != nil { - t.Fatalf("error reading from remote stdout: %s", err) - } - - want := int64(1024) - if n != want { - t.Fatalf("Expected %d bytes but read only %d from remote command", want, n) - } - } - - if changes := hostDB.checkCount; changes < 4 { - t.Errorf("got %d key changes, want 4", changes) - } -} - -func TestInvalidTerminalMode(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - defer conn.Close() - - session, err := conn.NewSession() - if err != nil { - t.Fatalf("session failed: %v", err) - } - defer session.Close() - - if err = session.RequestPty("vt100", 80, 40, ssh.TerminalModes{255: 1984}); err == nil { - t.Fatalf("req-pty failed: successful request with invalid mode") - } -} - -func TestValidTerminalMode(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - defer conn.Close() - - session, err := conn.NewSession() - if err != nil { - t.Fatalf("session failed: %v", err) - } - defer session.Close() - - stdout, err := session.StdoutPipe() - if err != nil { - t.Fatalf("unable to acquire stdout pipe: %s", err) - } - - stdin, err := session.StdinPipe() - if err != nil { - t.Fatalf("unable to acquire stdin pipe: %s", err) - } - - tm := ssh.TerminalModes{ssh.ECHO: 0} - if err = session.RequestPty("xterm", 80, 40, tm); err != nil { - t.Fatalf("req-pty failed: %s", err) - } - - err = session.Shell() - if err != nil { - t.Fatalf("session failed: %s", err) - } - - stdin.Write([]byte("stty -a && exit\n")) - - var buf bytes.Buffer - if _, err := io.Copy(&buf, stdout); err != nil { - t.Fatalf("reading failed: %s", err) - } - - if sttyOutput := buf.String(); !strings.Contains(sttyOutput, "-echo ") { - t.Fatalf("terminal mode failure: expected -echo in stty output, got %s", sttyOutput) - } -} - -func TestWindowChange(t *testing.T) { - server := newServer(t) - defer server.Shutdown() - conn := server.Dial(clientConfig()) - defer conn.Close() - - session, err := conn.NewSession() - if err != nil { - t.Fatalf("session failed: %v", err) - } - defer session.Close() - - stdout, err := session.StdoutPipe() - if err != nil { - t.Fatalf("unable to acquire stdout pipe: %s", err) - } - - stdin, err := session.StdinPipe() - if err != nil { - t.Fatalf("unable to acquire stdin pipe: %s", err) - } - - tm := ssh.TerminalModes{ssh.ECHO: 0} - if err = session.RequestPty("xterm", 80, 40, tm); err != nil { - t.Fatalf("req-pty failed: %s", err) - } - - if err := session.WindowChange(100, 100); err != nil { - t.Fatalf("window-change failed: %s", err) - } - - err = session.Shell() - if err != nil { - t.Fatalf("session failed: %s", err) - } - - stdin.Write([]byte("stty size && exit\n")) - - var buf bytes.Buffer - if _, err := io.Copy(&buf, stdout); err != nil { - t.Fatalf("reading failed: %s", err) - } - - if sttyOutput := buf.String(); !strings.Contains(sttyOutput, "100 100") { - t.Fatalf("terminal WindowChange failure: expected \"100 100\" stty output, got %s", sttyOutput) - } -} - -func TestCiphers(t *testing.T) { - var config ssh.Config - config.SetDefaults() - cipherOrder := config.Ciphers - // These ciphers will not be tested when commented out in cipher.go it will - // fallback to the next available as per line 292. - cipherOrder = append(cipherOrder, "aes128-cbc", "3des-cbc") - - for _, ciph := range cipherOrder { - server := newServer(t) - defer server.Shutdown() - conf := clientConfig() - conf.Ciphers = []string{ciph} - // Don't fail if sshd doesn't have the cipher. - conf.Ciphers = append(conf.Ciphers, cipherOrder...) - conn, err := server.TryDial(conf) - if err == nil { - conn.Close() - } else { - t.Fatalf("failed for cipher %q", ciph) - } - } -} - -func TestMACs(t *testing.T) { - var config ssh.Config - config.SetDefaults() - macOrder := config.MACs - - for _, mac := range macOrder { - server := newServer(t) - defer server.Shutdown() - conf := clientConfig() - conf.MACs = []string{mac} - // Don't fail if sshd doesn't have the MAC. - conf.MACs = append(conf.MACs, macOrder...) - if conn, err := server.TryDial(conf); err == nil { - conn.Close() - } else { - t.Fatalf("failed for MAC %q", mac) - } - } -} - -func TestKeyExchanges(t *testing.T) { - var config ssh.Config - config.SetDefaults() - kexOrder := config.KeyExchanges - for _, kex := range kexOrder { - server := newServer(t) - defer server.Shutdown() - conf := clientConfig() - // Don't fail if sshd doesn't have the kex. - conf.KeyExchanges = append([]string{kex}, kexOrder...) - conn, err := server.TryDial(conf) - if err == nil { - conn.Close() - } else { - t.Errorf("failed for kex %q", kex) - } - } -} - -func TestClientAuthAlgorithms(t *testing.T) { - for _, key := range []string{ - "rsa", - "dsa", - "ecdsa", - "ed25519", - } { - server := newServer(t) - conf := clientConfig() - conf.SetDefaults() - conf.Auth = []ssh.AuthMethod{ - ssh.PublicKeys(testSigners[key]), - } - - conn, err := server.TryDial(conf) - if err == nil { - conn.Close() - } else { - t.Errorf("failed for key %q", key) - } - - server.Shutdown() - } -} diff --git a/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go deleted file mode 100644 index e673536a..00000000 --- a/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd plan9 - -package test - -// functional test harness for unix. - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "net" - "os" - "os/exec" - "os/user" - "path/filepath" - "testing" - "text/template" - - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/testdata" -) - -const sshd_config = ` -Protocol 2 -HostKey {{.Dir}}/id_rsa -HostKey {{.Dir}}/id_dsa -HostKey {{.Dir}}/id_ecdsa -HostCertificate {{.Dir}}/id_rsa-cert.pub -Pidfile {{.Dir}}/sshd.pid -#UsePrivilegeSeparation no -KeyRegenerationInterval 3600 -ServerKeyBits 768 -SyslogFacility AUTH -LogLevel DEBUG2 -LoginGraceTime 120 -PermitRootLogin no -StrictModes no -RSAAuthentication yes -PubkeyAuthentication yes -AuthorizedKeysFile {{.Dir}}/authorized_keys -TrustedUserCAKeys {{.Dir}}/id_ecdsa.pub -IgnoreRhosts yes -RhostsRSAAuthentication no -HostbasedAuthentication no -PubkeyAcceptedKeyTypes=* -` - -var configTmpl = template.Must(template.New("").Parse(sshd_config)) - -type server struct { - t *testing.T - cleanup func() // executed during Shutdown - configfile string - cmd *exec.Cmd - output bytes.Buffer // holds stderr from sshd process - - // Client half of the network connection. - clientConn net.Conn -} - -func username() string { - var username string - if user, err := user.Current(); err == nil { - username = user.Username - } else { - // user.Current() currently requires cgo. If an error is - // returned attempt to get the username from the environment. - log.Printf("user.Current: %v; falling back on $USER", err) - username = os.Getenv("USER") - } - if username == "" { - panic("Unable to get username") - } - return username -} - -type storedHostKey struct { - // keys map from an algorithm string to binary key data. - keys map[string][]byte - - // checkCount counts the Check calls. Used for testing - // rekeying. - checkCount int -} - -func (k *storedHostKey) Add(key ssh.PublicKey) { - if k.keys == nil { - k.keys = map[string][]byte{} - } - k.keys[key.Type()] = key.Marshal() -} - -func (k *storedHostKey) Check(addr string, remote net.Addr, key ssh.PublicKey) error { - k.checkCount++ - algo := key.Type() - - if k.keys == nil || bytes.Compare(key.Marshal(), k.keys[algo]) != 0 { - return fmt.Errorf("host key mismatch. Got %q, want %q", key, k.keys[algo]) - } - return nil -} - -func hostKeyDB() *storedHostKey { - keyChecker := &storedHostKey{} - keyChecker.Add(testPublicKeys["ecdsa"]) - keyChecker.Add(testPublicKeys["rsa"]) - keyChecker.Add(testPublicKeys["dsa"]) - return keyChecker -} - -func clientConfig() *ssh.ClientConfig { - config := &ssh.ClientConfig{ - User: username(), - Auth: []ssh.AuthMethod{ - ssh.PublicKeys(testSigners["user"]), - }, - HostKeyCallback: hostKeyDB().Check, - HostKeyAlgorithms: []string{ // by default, don't allow certs as this affects the hostKeyDB checker - ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521, - ssh.KeyAlgoRSA, ssh.KeyAlgoDSA, - ssh.KeyAlgoED25519, - }, - } - return config -} - -// unixConnection creates two halves of a connected net.UnixConn. It -// is used for connecting the Go SSH client with sshd without opening -// ports. -func unixConnection() (*net.UnixConn, *net.UnixConn, error) { - dir, err := ioutil.TempDir("", "unixConnection") - if err != nil { - return nil, nil, err - } - defer os.Remove(dir) - - addr := filepath.Join(dir, "ssh") - listener, err := net.Listen("unix", addr) - if err != nil { - return nil, nil, err - } - defer listener.Close() - c1, err := net.Dial("unix", addr) - if err != nil { - return nil, nil, err - } - - c2, err := listener.Accept() - if err != nil { - c1.Close() - return nil, nil, err - } - - return c1.(*net.UnixConn), c2.(*net.UnixConn), nil -} - -func (s *server) TryDial(config *ssh.ClientConfig) (*ssh.Client, error) { - return s.TryDialWithAddr(config, "") -} - -// addr is the user specified host:port. While we don't actually dial it, -// we need to know this for host key matching -func (s *server) TryDialWithAddr(config *ssh.ClientConfig, addr string) (*ssh.Client, error) { - sshd, err := exec.LookPath("sshd") - if err != nil { - s.t.Skipf("skipping test: %v", err) - } - - c1, c2, err := unixConnection() - if err != nil { - s.t.Fatalf("unixConnection: %v", err) - } - - s.cmd = exec.Command(sshd, "-f", s.configfile, "-i", "-e") - f, err := c2.File() - if err != nil { - s.t.Fatalf("UnixConn.File: %v", err) - } - defer f.Close() - s.cmd.Stdin = f - s.cmd.Stdout = f - s.cmd.Stderr = &s.output - if err := s.cmd.Start(); err != nil { - s.t.Fail() - s.Shutdown() - s.t.Fatalf("s.cmd.Start: %v", err) - } - s.clientConn = c1 - conn, chans, reqs, err := ssh.NewClientConn(c1, addr, config) - if err != nil { - return nil, err - } - return ssh.NewClient(conn, chans, reqs), nil -} - -func (s *server) Dial(config *ssh.ClientConfig) *ssh.Client { - conn, err := s.TryDial(config) - if err != nil { - s.t.Fail() - s.Shutdown() - s.t.Fatalf("ssh.Client: %v", err) - } - return conn -} - -func (s *server) Shutdown() { - if s.cmd != nil && s.cmd.Process != nil { - // Don't check for errors; if it fails it's most - // likely "os: process already finished", and we don't - // care about that. Use os.Interrupt, so child - // processes are killed too. - s.cmd.Process.Signal(os.Interrupt) - s.cmd.Wait() - } - if s.t.Failed() { - // log any output from sshd process - s.t.Logf("sshd: %s", s.output.String()) - } - s.cleanup() -} - -func writeFile(path string, contents []byte) { - f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600) - if err != nil { - panic(err) - } - defer f.Close() - if _, err := f.Write(contents); err != nil { - panic(err) - } -} - -// newServer returns a new mock ssh server. -func newServer(t *testing.T) *server { - if testing.Short() { - t.Skip("skipping test due to -short") - } - dir, err := ioutil.TempDir("", "sshtest") - if err != nil { - t.Fatal(err) - } - f, err := os.Create(filepath.Join(dir, "sshd_config")) - if err != nil { - t.Fatal(err) - } - err = configTmpl.Execute(f, map[string]string{ - "Dir": dir, - }) - if err != nil { - t.Fatal(err) - } - f.Close() - - for k, v := range testdata.PEMBytes { - filename := "id_" + k - writeFile(filepath.Join(dir, filename), v) - writeFile(filepath.Join(dir, filename+".pub"), ssh.MarshalAuthorizedKey(testPublicKeys[k])) - } - - for k, v := range testdata.SSHCertificates { - filename := "id_" + k + "-cert.pub" - writeFile(filepath.Join(dir, filename), v) - } - - var authkeys bytes.Buffer - for k, _ := range testdata.PEMBytes { - authkeys.Write(ssh.MarshalAuthorizedKey(testPublicKeys[k])) - } - writeFile(filepath.Join(dir, "authorized_keys"), authkeys.Bytes()) - - return &server{ - t: t, - configfile: f.Name(), - cleanup: func() { - if err := os.RemoveAll(dir); err != nil { - t.Error(err) - } - }, - } -} - -func newTempSocket(t *testing.T) (string, func()) { - dir, err := ioutil.TempDir("", "socket") - if err != nil { - t.Fatal(err) - } - deferFunc := func() { os.RemoveAll(dir) } - addr := filepath.Join(dir, "sock") - return addr, deferFunc -} diff --git a/vendor/golang.org/x/crypto/ssh/test/testdata_test.go b/vendor/golang.org/x/crypto/ssh/test/testdata_test.go deleted file mode 100644 index a053f67e..00000000 --- a/vendor/golang.org/x/crypto/ssh/test/testdata_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places: -// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three -// instances. - -package test - -import ( - "crypto/rand" - "fmt" - - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/testdata" -) - -var ( - testPrivateKeys map[string]interface{} - testSigners map[string]ssh.Signer - testPublicKeys map[string]ssh.PublicKey -) - -func init() { - var err error - - n := len(testdata.PEMBytes) - testPrivateKeys = make(map[string]interface{}, n) - testSigners = make(map[string]ssh.Signer, n) - testPublicKeys = make(map[string]ssh.PublicKey, n) - for t, k := range testdata.PEMBytes { - testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k) - if err != nil { - panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err)) - } - testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t]) - if err != nil { - panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err)) - } - testPublicKeys[t] = testSigners[t].PublicKey() - } - - // Create a cert and sign it for use in tests. - testCert := &ssh.Certificate{ - Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil - ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage - ValidAfter: 0, // unix epoch - ValidBefore: ssh.CertTimeInfinity, // The end of currently representable time. - Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil - Key: testPublicKeys["ecdsa"], - SignatureKey: testPublicKeys["rsa"], - Permissions: ssh.Permissions{ - CriticalOptions: map[string]string{}, - Extensions: map[string]string{}, - }, - } - testCert.SignCert(rand.Reader, testSigners["rsa"]) - testPrivateKeys["cert"] = testPrivateKeys["ecdsa"] - testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"]) - if err != nil { - panic(fmt.Sprintf("Unable to create certificate signer: %v", err)) - } -}