diff --git a/health_check.md b/health_check.md new file mode 100644 index 00000000..396ac3b3 --- /dev/null +++ b/health_check.md @@ -0,0 +1,88 @@ +# Etcd Cluster Health Check + +## Description + +Implement a passive health check mechanism, that when the connection/read/write fails, record it as an endpoint's failure. + +## Methods + +* [init](#init) +* [report_failure](#report_failure) +* [get_target_status](#get_target_status) + +### init + +`syntax: health_check, err = health_check.init(params)` + +Initializes the health check object, overiding default params with the given ones. In case of failures, returns `nil` and a string describing the error. + +### report_failure + +`syntax: health_check.report_failure(etcd_host)` + +Reports a health failure which will count against the number of occurrences required to make a target "fail". + +### get_target_status + +`syntax: healthy, err = health_check.get_target_status(etcd_host)` + +Get the current status of the target. + +## Config + +| name | Type | Requirement | Default | Description | +| ------------ | ------- | ----------- | ------- | ------------------------------------------------------------ | +| shm_name | string | required | | the declarative `lua_shared_dict` is used to store the health status of endpoints. | +| fail_timeout | integer | optional | 10s | sets the time during which the specified number of unsuccessful attempts to communicate with the endpoint should happen to marker the endpoint unavailable, and also sets the period of time the endpoint will be marked unavailable. | +| max_fails | integer | optional | 1 | sets the number of failed attempts that must occur during the `fail_timeout` period for the endpoint to be marked unavailable. | + +lua example: + +```lua +local health_check, err = require("resty.etcd.health_check").init({ + shm_name = "healthcheck_shm", + fail_timeout = 10, + max_fails = 1, +}) +``` + +In a `fail_timeout`, if there are `max_fails` consecutive failures, the endpoint is marked as unhealthy, the unhealthy endpoint will not be choosed to connect for a `fail_timeout` time in the future. + +Health check mechanism would switch endpoint only when the previously choosed endpoint is marked as unhealthy. + +The failure counter and health status of each etcd endpoint are shared across workers and by different etcd clients. + +Also note that the `fail_timeout` and `max_fails` of the health check cannot be changed once it has been created. + +## Synopsis + +```nginx +http { + # required declares a shared memory zone to store endpoints's health status + lua_shared_dict healthcheck_shm 1m; + + server { + location = /healthcheck { + content_by_lua_block { + # the health check feature is optional, and can be enabled with the following configuration. + local health_check, err = require("resty.etcd.health_check").init({ + shm_name = "healthcheck_shm", + fail_timeout = 10, + max_fails = 1, + }) + + local etcd, err = require("resty.etcd").new({ + protocol = "v3", + http_host = { + "http://127.0.0.1:12379", + "http://127.0.0.1:22379", + "http://127.0.0.1:32379", + }, + user = 'root', + password = 'abc123', + }) + } + } + } +} +``` diff --git a/lib/resty/etcd/health_check.lua b/lib/resty/etcd/health_check.lua new file mode 100644 index 00000000..35a5a47f --- /dev/null +++ b/lib/resty/etcd/health_check.lua @@ -0,0 +1,85 @@ +local ngx_shared = ngx.shared +local utils = require("resty.etcd.utils") +local conf + +local _M = {} + +local function gen_unhealthy_key(etcd_host) + return "unhealthy-" .. etcd_host +end + +local function get_target_status(etcd_host) + if conf == nil then + return + end + + local unhealthy_key = gen_unhealthy_key(etcd_host) + local unhealthy_endpoint, err = ngx_shared[conf.shm_name]:get(unhealthy_key) + if err then + utils.log_warn("failed to get unhealthy_key: ", + unhealthy_key, " err: ", err) + return + end + + if not unhealthy_endpoint then + return true + end + + return false +end +_M.get_target_status = get_target_status + + +local function fault_count(key, shm_name, fail_timeout) + local new_value, err, forcible = ngx_shared[shm_name]:incr(key, 1, 0, fail_timeout) + if err then + return nil, err + end + + if forcible then + utils.log_warn("shared dict: ", shm_name, " is full, valid items forcibly overwritten") + end + return new_value, nil +end + + +local function report_failure(etcd_host) + if conf == nil then + return + end + + local fails, err = fault_count(etcd_host, conf.shm_name, conf.fail_timeout) + if err then + utils.log_error("failed to incr etcd endpoint fail times: ", err) + return + end + + if fails >= conf.max_fails then + local unhealthy_key = gen_unhealthy_key(etcd_host) + local unhealthy_endpoint, _ = ngx_shared[conf.shm_name]:get(unhealthy_key) + if unhealthy_endpoint == nil then + ngx_shared[conf.shm_name]:set(unhealthy_key, etcd_host, + conf.fail_timeout) + utils.log_warn("update endpoint: ", etcd_host, " to unhealthy") + end + end +end +_M.report_failure = report_failure + + +function _M.init(opts) + if conf == nil then + conf = {} + local shared_dict = ngx_shared[opts.shm_name] + if not shared_dict then + return nil, "failed to get ngx.shared dict: " .. opts.shm_name + end + conf.shm_name = opts.shm_name + conf.fail_timeout = opts.fail_timeout or 10 -- 10 sec + conf.max_fails = opts.max_fails or 1 + _M.conf = conf + return _M, nil + end +end + +return _M diff --git a/lib/resty/etcd/utils.lua b/lib/resty/etcd/utils.lua index c9766e3f..3272d998 100644 --- a/lib/resty/etcd/utils.lua +++ b/lib/resty/etcd/utils.lua @@ -84,6 +84,7 @@ end local ngx_log = ngx.log local ngx_ERR = ngx.ERR local ngx_INFO = ngx.INFO +local ngx_WARN = ngx.WARN local function log_error(...) return ngx_log(ngx_ERR, ...) end @@ -95,6 +96,13 @@ local function log_info( ... ) end _M.log_info = log_info + +local function log_warn( ... ) + return ngx_log(ngx_WARN, ...) +end +_M.log_warn = log_warn + + local function verify_key(key) if not key or #key == 0 then return false, "key should not be empty" diff --git a/lib/resty/etcd/v3.lua b/lib/resty/etcd/v3.lua index 5e03fa5c..8ac3224a 100644 --- a/lib/resty/etcd/v3.lua +++ b/lib/resty/etcd/v3.lua @@ -22,6 +22,7 @@ local encode_base64 = ngx.encode_base64 local decode_base64 = ngx.decode_base64 local semaphore = require("ngx.semaphore") local INIT_COUNT_RESIZE = 2e8 +local health_check = require("resty.etcd.health_check") local _M = {} @@ -30,7 +31,7 @@ local mt = { __index = _M } -- define local refresh function variable local refresh_jwt_token -local function _request_uri(self, method, uri, opts, timeout, ignore_auth) +local function _request_uri(self, endpoint, method, uri, opts, timeout, ignore_auth) utils.log_info("v3 request uri: ", uri, ", timeout: ", timeout) local body @@ -76,10 +77,16 @@ local function _request_uri(self, method, uri, opts, timeout, ignore_auth) }) if err then + if health_check.conf ~= nil then + health_check.report_failure(endpoint.http_host) + end return nil, err end if res.status >= 500 then + if health_check.conf ~= nil then + health_check.report_failure(endpoint.http_host) + end return nil, "invalid response code: " .. res.status end @@ -198,6 +205,15 @@ local function choose_endpoint(self) return endpoints[1] end + if health_check.conf ~= nil then + for _, endpoint in ipairs(endpoints) do + if health_check.get_target_status(endpoint.http_host) then + return endpoint + end + end + utils.log_warn("has no healthy endpoint") + end + self.init_count = (self.init_count or 0) + 1 local pos = self.init_count % endpoints_len + 1 if self.init_count >= INIT_COUNT_RESIZE then @@ -249,8 +265,9 @@ function refresh_jwt_token(self, timeout) password = self.password, } } - local res, err = _request_uri(self, 'POST', - choose_endpoint(self).full_prefix .. "/auth/authenticate", + local endpoint = choose_endpoint(self) + local res, err = _request_uri(self, endpoint, 'POST', + endpoint.full_prefix .. "/auth/authenticate", opts, timeout, true) self.requesting_token = false @@ -323,9 +340,10 @@ local function set(self, key, val, attr) } } + local endpoint = choose_endpoint(self) local res - res, err = _request_uri(self, 'POST', - choose_endpoint(self).full_prefix .. "/kv/put", + res, err = _request_uri(self, endpoint, 'POST', + endpoint.full_prefix .. "/kv/put", opts, self.timeout) if err then return nil, err @@ -430,9 +448,10 @@ local function get(self, key, attr) } } + local endpoint = choose_endpoint(self) local res - res, err = _request_uri(self, "POST", - choose_endpoint(self).full_prefix .. "/kv/range", + res, err = _request_uri(self, endpoint, "POST", + endpoint.full_prefix .. "/kv/range", opts, attr and attr.timeout or self.timeout) if res and res.status == 200 then @@ -471,8 +490,9 @@ local function delete(self, key, attr) }, } - return _request_uri(self, "POST", - choose_endpoint(self).full_prefix .. "/kv/deleterange", + local endpoint = choose_endpoint(self) + return _request_uri(self, endpoint, "POST", + endpoint.full_prefix .. "/kv/deleterange", opts, self.timeout) end @@ -494,13 +514,14 @@ local function txn(self, opts_arg, compare, success, failure) }, } - return _request_uri(self, "POST", - choose_endpoint(self).full_prefix .. "/kv/txn", + local endpoint = choose_endpoint(self) + return _request_uri(self, endpoint, "POST", + endpoint.full_prefix .. "/kv/txn", opts, timeout or self.timeout) end -local function request_chunk(self, method, scheme, host, port, path, opts, timeout) +local function request_chunk(self, endpoint, method, scheme, host, port, path, opts, timeout) local body, err, _ if opts and opts.body and tab_nkeys(opts.body) > 0 then body, err = encode_json(opts.body) @@ -540,6 +561,9 @@ local function request_chunk(self, method, scheme, host, port, path, opts, timeo ok, err = http_cli:connect(host, port) if not ok then + if health_check.conf ~= nil then + health_check.report_failure(endpoint.http_host) + end return nil, err end @@ -591,6 +615,10 @@ local function request_chunk(self, method, scheme, host, port, path, opts, timeo body, err = decode_json(body) if not body then return nil, "failed to decode json body: " .. (err or " unkwon") + elseif body.error and body.error.http_code >= 500 then + if health_check.conf ~= nil then + health_check.report_failure(endpoint.http_host) + end end if body.result.events then @@ -700,7 +728,7 @@ local function watch(self, key, attr) local endpoint = choose_endpoint(self) - local callback_fun, err, http_cli = request_chunk(self, 'POST', + local callback_fun, err, http_cli = request_chunk(self, endpoint, 'POST', endpoint.scheme, endpoint.host, endpoint.port, @@ -931,8 +959,9 @@ function _M.grant(self, ttl, id) }, } - return _request_uri(self, "POST", - choose_endpoint(self).full_prefix .. "/lease/grant", opts) + local endpoint = choose_endpoint(self) + return _request_uri(self, endpoint, "POST", + endpoint.full_prefix .. "/lease/grant", opts) end function _M.revoke(self, id) @@ -946,8 +975,9 @@ function _M.revoke(self, id) }, } - return _request_uri(self, "POST", - choose_endpoint(self).full_prefix .. "/kv/lease/revoke", opts) + local endpoint = choose_endpoint(self) + return _request_uri(self, endpoint, "POST", + endpoint.full_prefix .. "/kv/lease/revoke", opts) end function _M.keepalive(self, id) @@ -961,8 +991,9 @@ function _M.keepalive(self, id) }, } - return _request_uri(self, "POST", - choose_endpoint(self).full_prefix .. "/lease/keepalive", opts) + local endpoint = choose_endpoint(self) + return _request_uri(self, endpoint, "POST", + endpoint.full_prefix .. "/lease/keepalive", opts) end function _M.timetolive(self, id, keys) @@ -978,8 +1009,9 @@ function _M.timetolive(self, id, keys) }, } - local res, err = _request_uri(self, "POST", - choose_endpoint(self).full_prefix .. "/kv/lease/timetolive", opts) + local endpoint = choose_endpoint(self) + local res, err = _request_uri(self, endpoint, "POST", + endpoint.full_prefix .. "/kv/lease/timetolive", opts) if res and res.status == 200 then if res.body.keys and tab_nkeys(res.body.keys) > 0 then @@ -993,34 +1025,39 @@ function _M.timetolive(self, id, keys) end function _M.leases(self) - return _request_uri(self, "POST", - choose_endpoint(self).full_prefix .. "/lease/leases") + local endpoint = choose_endpoint(self) + return _request_uri(self, endpoint, "POST", + endpoint.full_prefix .. "/lease/leases") end -- /version function _M.version(self) - return _request_uri(self, "GET", - choose_endpoint(self).http_host .. "/version", + local endpoint = choose_endpoint(self) + return _request_uri(self, endpoint, "GET", + endpoint.http_host .. "/version", nil, self.timeout) end -- /stats function _M.stats_leader(self) - return _request_uri(self, "GET", - choose_endpoint(self).http_host .. "/v2/stats/leader", + local endpoint = choose_endpoint(self) + return _request_uri(self, endpoint, "GET", + endpoint.http_host .. "/v2/stats/leader", nil, self.timeout) end function _M.stats_self(self) - return _request_uri(self, "GET", - choose_endpoint(self).http_host .. "/v2/stats/self", + local endpoint = choose_endpoint(self) + return _request_uri(self, endpoint, "GET", + endpoint.http_host .. "/v2/stats/self", nil, self.timeout) end function _M.stats_store(self) - return _request_uri(self, "GET", - choose_endpoint(self).http_host .. "/v2/stats/store", + local endpoint = choose_endpoint(self) + return _request_uri(self, endpoint, "GET", + endpoint.http_host .. "/v2/stats/store", nil, self.timeout) end diff --git a/rockspec/lua-resty-etcd-master-0.1-0.rockspec b/rockspec/lua-resty-etcd-master-0.1-0.rockspec index 37b94df0..22b78776 100644 --- a/rockspec/lua-resty-etcd-master-0.1-0.rockspec +++ b/rockspec/lua-resty-etcd-master-0.1-0.rockspec @@ -26,5 +26,6 @@ build = { ["resty.etcd.utils"] = "lib/resty/etcd/utils.lua", ["resty.etcd.serializers.json"] = "lib/resty/etcd/serializers/json.lua", ["resty.etcd.serializers.raw"] = "lib/resty/etcd/serializers/raw.lua", + ["resty.etcd.health_check"] = "lib/resty/etcd/health_check.lua", } } diff --git a/t/v3/health_check.t b/t/v3/health_check.t new file mode 100644 index 00000000..504849e9 --- /dev/null +++ b/t/v3/health_check.t @@ -0,0 +1,438 @@ +use Test::Nginx::Socket::Lua; + +log_level('info'); +no_long_string(); +repeat_each(1); +workers(2); + +my $etcd_version = `etcd --version`; +if ($etcd_version =~ /^etcd Version: 2/ || $etcd_version =~ /^etcd Version: 3.1./ || $etcd_version =~ /^etcd Version: 3.2./) { + plan(skip_all => "etcd is too old, skip v3 protocol"); +} else { + my $enable_tls = $ENV{ETCD_ENABLE_TLS}; + if ($enable_tls eq "TRUE") { + plan(skip_all => "skip test cases with auth when TLS is enabled"); + } else { + plan 'no_plan'; + } +} + +our $HttpConfig = <<'_EOC_'; + lua_socket_log_errors off; + lua_package_path 'lib/?.lua;/usr/local/share/lua/5.3/?.lua;/usr/share/lua/5.1/?.lua;;'; + lua_shared_dict etcd_cluster_health_check 8m; + init_by_lua_block { + local cjson = require("cjson.safe") + + function check_res(data, err, val, status) + if err then + ngx.say("err: ", err) + ngx.exit(200) + end + + if val then + if data.body.kvs==nil then + ngx.exit(404) + end + if data.body.kvs and val ~= data.body.kvs[1].value then + ngx.say("failed to check value") + ngx.log(ngx.ERR, "failed to check value, got: ", data.body.kvs[1].value, + ", expect: ", val) + ngx.exit(200) + else + ngx.say("checked val as expect: ", val) + end + end + + if status and status ~= data.status then + ngx.exit(data.status) + end + end + } +_EOC_ + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- http_config eval: $::HttpConfig +--- config + location /t { + content_by_lua_block { + local health_check, err = require "resty.etcd.health_check" .init({ + shm_name = "etcd_cluster_health_check", + fail_timeout = 5, + max_fails = 3, + }) + assert( err == nil) + assert( health_check.conf ~= nil) + + local etcd, err = require "resty.etcd" .new({ + protocol = "v3", + http_host = { + "http://127.0.0.1:12379", + "http://127.0.0.1:22379", + "http://127.0.0.1:32379", + }, + user = 'root', + password = 'abc123', + }) + check_res(etcd, err) + + ngx.say("done") + } + } +--- request +GET /t +--- no_error_log +[error] +--- response_body +done + + + +=== TEST 2: default configuration +--- http_config eval: $::HttpConfig +--- config + location /t { + content_by_lua_block { + local health_check, err = require "resty.etcd.health_check" .init({ + shm_name = "etcd_cluster_health_check", + }) + ngx.say(health_check.conf.max_fails) + ngx.say(health_check.conf.fail_timeout) + } + } +--- request +GET /t +--- response_body +1 +10 +--- no_error_log +[error] + + + +=== TEST 3: bad shm_name +--- http_config eval: $::HttpConfig +--- config + location /t { + content_by_lua_block { + local health_check, err = require "resty.etcd.health_check" .init({ + shm_name = "error_shm_name", + }) + ngx.say(err) + } + } +--- request +GET /t +--- response_body +failed to get ngx.shared dict: error_shm_name +--- no_error_log +[error] + + + +=== TEST 4: trigger unhealthy +--- http_config eval: $::HttpConfig +--- config + location /t { + content_by_lua_block { + local health_check, err = require "resty.etcd.health_check" .init({ + shm_name = "etcd_cluster_health_check", + fail_timeout = 10, + max_fails = 1, + }) + + local etcd, err = require "resty.etcd" .new({ + protocol = "v3", + http_host = { + "http://127.0.0.1:42379", + "http://127.0.0.1:22379", + "http://127.0.0.1:32379", + }, + user = 'root', + password = 'abc123', + }) + + local res, err = etcd:set("/trigger_unhealthy", { a='abc'}) + ngx.say("done") + } + } +--- request +GET /t +--- error_log eval +qr/update endpoint: http:\/\/127.0.0.1:42379 to unhealthy/ +--- response_body +done + + + +=== TEST 5: fault count +--- http_config eval: $::HttpConfig +--- config + location /t { + content_by_lua_block { + local health_check, err = require "resty.etcd.health_check" .init({ + shm_name = "etcd_cluster_health_check", + fail_timeout = 5, + max_fails = 3, + }) + + local etcd, err = require "resty.etcd" .new({ + protocol = "v3", + http_host = { + "http://127.0.0.1:42379", + "http://127.0.0.1:22379", + "http://127.0.0.1:32379", + }, + user = 'root', + password = 'abc123', + }) + + etcd:set("/fault_count", { a='abc'}) + etcd:set("/fault_count", { a='abc'}) + local fails, err = ngx.shared["etcd_cluster_health_check"]:get("http://127.0.0.1:42379") + if err then + ngx.say(err) + end + ngx.say(fails) + } + } +--- request +GET /t +--- response_body +2 +--- no_error_log +[error] + + + +=== TEST 6: check endpoint is healthy +--- http_config eval: $::HttpConfig +--- config + location /t { + content_by_lua_block { + local health_check, err = require "resty.etcd.health_check" .init({ + shm_name = "etcd_cluster_health_check", + fail_timeout = 3, + max_fails = 1, + }) + + local etcd, err = require "resty.etcd" .new({ + protocol = "v3", + http_host = { + "http://127.0.0.1:42379", + "http://127.0.0.1:22379", + "http://127.0.0.1:32379", + }, + user = 'root', + password = 'abc123', + }) + + etcd:set("/get_target_status", { a='abc'}) + + local healthy = health_check.get_target_status("http://127.0.0.1:42379") + ngx.say(healthy) + } + } +--- request +GET /t +--- response_body +false +--- no_error_log +[error] + + + +=== TEST 7: make sure `fail_timeout` works +--- http_config eval: $::HttpConfig +--- config + location /t { + content_by_lua_block { + local health_check, err = require "resty.etcd.health_check" .init({ + shm_name = "etcd_cluster_health_check", + fail_timeout = 2, + max_fails = 1, + }) + + local etcd, err = require "resty.etcd" .new({ + protocol = "v3", + http_host = { + "http://127.0.0.1:42379", + "http://127.0.0.1:22379", + "http://127.0.0.1:32379", + }, + user = 'root', + password = 'abc123', + }) + + local res, err = etcd:set("/fail_timeout", "http://127.0.0.1:42379") -- trigger http://127.0.0.1:42379 to unhealthy + + res, err = etcd:set("/fail_timeout", "http://127.0.0.1:22379") -- choose http://127.0.0.1:22379 to set value + res, err = etcd:get("/fail_timeout") + assert(res.body.kvs[1].value == "http://127.0.0.1:22379") + + ngx.sleep(2) + + res, err = etcd:set("/fail_timeout", "http://127.0.0.1:42379") -- choose http://127.0.0.1:42379 to set value + res, err = etcd:get("/fail_timeout") + assert(res.body.kvs[1].value == "http://127.0.0.1:22379") + + ngx.say("done") + } + } +--- request +GET /t +--- timeout: 5 +--- response_body +done +--- no_error_log +[error] + + + +=== TEST 8: has no healthy etcd endpoint, follow old style +--- http_config eval: $::HttpConfig +--- config + location /t { + content_by_lua_block { + local health_check, err = require "resty.etcd.health_check" .init({ + shm_name = "etcd_cluster_health_check", + fail_timeout = 3, + max_fails = 1, + }) + + local etcd, err = require "resty.etcd" .new({ + protocol = "v3", + http_host = { + "http://127.0.0.1:12379", + "http://127.0.0.1:22379", + "http://127.0.0.1:32379", + }, + user = 'root', + password = 'abc123', + }) + + health_check.report_failure("http://127.0.0.1:12379") + health_check.report_failure("http://127.0.0.1:22379") + health_check.report_failure("http://127.0.0.1:32379") + + local res, err = etcd:set("/no_healthy_endpoint", "hello") + check_res(etcd, err) + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log eval +qr/has no healthy endpoint/ + + + +=== TEST 9: `health_check` shared by different etcd clients +--- http_config eval: $::HttpConfig +--- config + location /t { + content_by_lua_block { + local health_check, err = require "resty.etcd.health_check" .init({ + shm_name = "etcd_cluster_health_check", + fail_timeout = 3, + max_fails = 2, + }) + + local etcd1, err = require "resty.etcd" .new({ + protocol = "v3", + http_host = { + "http://127.0.0.1:42379", + "http://127.0.0.1:22379", + "http://127.0.0.1:32379", + }, + user = 'root', + password = 'abc123', + }) + + local etcd2, err = require "resty.etcd" .new({ + protocol = "v3", + http_host = { + "http://127.0.0.1:42379", + "http://127.0.0.1:22379", + "http://127.0.0.1:32379", + }, + user = 'root', + password = 'abc123', + }) + + assert(tostring(etcd1) ~= tostring(etcd2)) + etcd1:set("/etcd1", "hello") + etcd2:set("/etcd2", "hello") + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log eval +qr/update endpoint: http:\/\/127.0.0.1:42379 to unhealthy/ + + + +=== TEST 10: mock etcd error and report fault +--- http_config eval: $::HttpConfig +--- config + location /v3/auth/authenticate { + content_by_lua_block { -- mock normal authenticate response + ngx.print([[{ + body = '{"header":{"cluster_id":"17237436991929493444","member_id":"9372538179322589801","revision":"40","raft_term":"633"},"token":"KicnFPYazDaiMHBG.74"}', + reason = "OK", + status = 200 + }]]) + } + } + + location /v3/kv/put { + content_by_lua_block { -- mock abnormal put key response + ngx.print([[{ + body = '{"error":"etcdserver: request timed out","message":"etcdserver: request timed out","code":14}', + reason = "Service Unavailable", + status = 503, + }]]) + } + } + + location /t { + content_by_lua_block { + local health_check, err = require "resty.etcd.health_check" .init({ + shm_name = "etcd_cluster_health_check", + fail_timeout = 10, + max_fails = 1, + }) + + local etcd, err = require "resty.etcd" .new({ + protocol = "v3", + http_host = { + "http://127.0.0.1:12379", + }, + user = 'root', + password = 'abc123', + }) + + etcd.endpoints[1].full_prefix="http://localhost:1984/v3" -- replace the endpoint with mock + etcd.endpoints[1].http_host="http://localhost:1984" + local res, err = etcd:set("/etcd_error", "hello") + local fails, err = ngx.shared["etcd_cluster_health_check"]:get("http://localhost:1984") + ngx.say(fails) + } + } +--- request +GET /t +--- response_body +1 +--- error_log eval +qr/update endpoint: http:\/\/localhost:1984 to unhealthy/