Skip to content

Commit

Permalink
wip: passive health checks
Browse files Browse the repository at this point in the history
Signed-off-by: Roman Zavodskikh <roman.zavodskikh@zalando.de>
  • Loading branch information
Roman Zavodskikh committed Feb 14, 2024
1 parent 6cf76dc commit 19bbbe0
Show file tree
Hide file tree
Showing 9 changed files with 405 additions and 20 deletions.
8 changes: 8 additions & 0 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,9 @@ type Config struct {
OpenPolicyAgentEnvoyMetadata string `yaml:"open-policy-agent-envoy-metadata"`
OpenPolicyAgentCleanerInterval time.Duration `yaml:"open-policy-agent-cleaner-interval"`
OpenPolicyAgentStartupTimeout time.Duration `yaml:"open-policy-agent-startup-timeout"`

// Passive Health Checks
PassiveHealthCheck mapFlags `yaml:"passive-health-check"`
}

const (
Expand Down Expand Up @@ -567,6 +570,9 @@ func NewConfig() *Config {
flag.Var(cfg.LuaModules, "lua-modules", "comma separated list of lua filter modules. Use <module>.<symbol> to selectively enable module symbols, for example: package,base._G,base.print,json")
flag.Var(cfg.LuaSources, "lua-sources", `comma separated list of lua input types for the lua() filter. Valid sources "", "file", "inline", "file,inline" and "none". Use "file" to only allow lua file references in lua filter. Default "" is the same as "file","inline". Use "none" to disable lua filters.`)

// Passive Health Checks
flag.Var(&cfg.PassiveHealthCheck, "passive-health-check", "sets the parameters for passive health check feature")

cfg.flags = flag
return cfg
}
Expand Down Expand Up @@ -906,6 +912,8 @@ func (c *Config) ToOptions() skipper.Options {
OpenPolicyAgentEnvoyMetadata: c.OpenPolicyAgentEnvoyMetadata,
OpenPolicyAgentCleanerInterval: c.OpenPolicyAgentCleanerInterval,
OpenPolicyAgentStartupTimeout: c.OpenPolicyAgentStartupTimeout,

PassiveHealthCheck: c.PassiveHealthCheck.values,
}
for _, rcci := range c.CloneRoute {
eskipClone := eskip.NewClone(rcci.Reg, rcci.Repl)
Expand Down
32 changes: 32 additions & 0 deletions docs/operation/operation.md
Original file line number Diff line number Diff line change
Expand Up @@ -893,6 +893,38 @@ to get the results paginated or getting all of them at the same time.
curl localhost:9911/routes?offset=200&limit=100
```

## Passive health check (*experimental*)

Skipper has an option to automatically detect and mitigate faulty backend endpoints, this feature is called
Passive Health Check(PHC).

PHC works the following way: the entire uptime is divided in chunks of `period`, per every period Skipper calculates
the total amount of requests and amount of requests failed per every endpoint. While next period is going on,
the Skipper takes a look at previous period and if for the given endpoint:
+ the amount of requests in the previous period is more than `min-requests`
AND
+ the ratio of failed requests is more than `min-failed`
then Skipper will send reduced (the more `requests-discount` and failed requests ratio in previous period are, the stronger reduction is,
but it never reduces lower than `min-requests`) amount of requests compared to amount sent without PHC.

Having this, we expect less requests to fail because a lot of them would be sent to endpoints that seem to be healthy instead.

To enable this feature, you need to provide `-passive-health-check` option having all forementioned parameters
(`period`, `min-requests`, `min-failed`, `requests-discount`) defined,
for instance: `-passive-health-check=period=1m,min-requests=10,min-failed=0.2,requests-discount=0.9`.

You need to define all parameters on your side, there are no defaults, and skipper will not run if PHC params are passed only partially.

However, Skipper will run without this feature, if no `-passive-health-check` is provided at all.

The parameters of `-passive-health-check` option are:
+ `period=<duration>` - the duration of stats reset period
+ `min-requests=<int>` - the minimum number of requests per minute per backend endpoint required to activate PHC for this endpoint
+ `min-failed=<float more than 0 and less than 1>` - the minimum ratio of failed requests for every endpoint to make PHC
reduce amount of requests to this endpoint
+ `requests-discount=<float more than 0 and less than 1>` - the discount multiplier which defines how huge is reduction of requests
sent to endpoints considered unhealthy

## Memory consumption

While Skipper is generally not memory bound, some features may require
Expand Down
6 changes: 6 additions & 0 deletions metricsinit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,12 @@ func TestInitOrderAndDefault(t *testing.T) {
SwarmRedisURLs: []string{fmt.Sprintf("localhost:%d", redisPort)},
EnableRatelimiters: true,
SwarmRedisConnMetricsInterval: ringMetricsUpdatePeriod,
PassiveHealthCheck: map[string]string{
"period": "1m",
"min-requests": "10",
"min-failed": "0.2",
"requests-discount": "0.9",
},
}

tornDown := make(chan struct{})
Expand Down
34 changes: 34 additions & 0 deletions proxy/healthy_endpoints.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
package proxy

import (
"math/rand"

"github.com/zalando/skipper/routing"
)

type healthyEndpoints struct {
rnd *rand.Rand
endpointRegistry *routing.EndpointRegistry
}

func (h *healthyEndpoints) filterHealthyEndpoints(endpoints []routing.LBEndpoint, rt *routing.Route) []routing.LBEndpoint {
if h == nil {
return endpoints
}

p := h.rnd.Float64()

filtered := make([]routing.LBEndpoint, 0, len(endpoints))
for _, e := range endpoints {
if p < e.Metrics.HealthCheckDropProbability() {
/* drop */
} else {
filtered = append(filtered, e)
}
}

if len(filtered) == 0 {
return endpoints
}
return filtered
}
130 changes: 124 additions & 6 deletions proxy/healthy_endpoints_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"time"

"github.com/stretchr/testify/assert"
"github.com/zalando/skipper/eskip"
"github.com/zalando/skipper/loadbalancer"
"github.com/zalando/skipper/routing"
)
Expand All @@ -20,7 +21,123 @@ const (
)

func defaultEndpointRegistry() *routing.EndpointRegistry {
return routing.NewEndpointRegistry(routing.RegistryOptions{})
return routing.NewEndpointRegistry(routing.RegistryOptions{
PassiveHealthCheckEnabled: true,
StatsResetPeriod: period,
MinRequests: 10,
MinFailed: 0.1,
RequestsDiscount: 1.0,
})
}

func initialize(hosts []string) (*routing.EndpointRegistry, *routing.Route, *Proxy) {
endpointRegistry := defaultEndpointRegistry()
eskipRoute := eskip.Route{BackendType: eskip.LBBackend, LBAlgorithm: loadbalancer.PowerOfRandomNChoices.String()}
for i := range hosts {
eskipRoute.LBEndpoints = append(eskipRoute.LBEndpoints, fmt.Sprintf("http://%s", hosts[i]))
}

route := &routing.Route{
Route: eskipRoute,
LBEndpoints: []routing.LBEndpoint{},
}
rt := loadbalancer.NewAlgorithmProvider().Do([]*routing.Route{route})
route = rt[0]
endpointRegistry.Do([]*routing.Route{route})

proxy := &Proxy{
registry: endpointRegistry,
heathlyEndpoints: &healthyEndpoints{rnd: rand.New(loadbalancer.NewLockedSource()), endpointRegistry: endpointRegistry},
}

return endpointRegistry, route, proxy
}

func TestFilterHealthyEndpointsReturnsAllEndpointsWhenAllAreHealthy(t *testing.T) {
hosts := []string{"host1.test:80", "host2.test:80", "host3.test:80"}
endpointRegistry, route, proxy := initialize(hosts)

for i := range hosts {
metrics := endpointRegistry.GetMetrics(hosts[i])
for j := 0; j < 50; j++ {
metrics.IncTotalRequests()
}
}
time.Sleep(period + time.Millisecond)
endpointRegistry.Do([]*routing.Route{route})

nHosts := map[int]int{}
for i := 0; i < nRequests; i++ {
endpoints := proxy.heathlyEndpoints.filterHealthyEndpoints(route.LBEndpoints, route)
v := nHosts[len(endpoints)]
nHosts[len(endpoints)] = v + 1
}

assert.Equal(t, nRequests, nHosts[3])
assert.Equal(t, 0, nHosts[2])
assert.Equal(t, 0, nHosts[1])
}

func TestFilterHealthyEndpointsReturnsAllEndpointsWhenAllAreUnhealthy(t *testing.T) {
hosts := []string{"host1.test:80", "host2.test:80", "host3.test:80"}
endpointRegistry, route, proxy := initialize(hosts)

for i := range hosts {
metrics := endpointRegistry.GetMetrics(hosts[i])
for j := 0; j < 50; j++ {
metrics.IncTotalRequests()
}
for j := 0; j < 30; j++ {
metrics.IncTotalFailedRoundTrips()
}
}
time.Sleep(period + time.Millisecond)
endpointRegistry.Do([]*routing.Route{route})

nHosts := map[int]int{}
for i := 0; i < nRequests; i++ {
endpoints := proxy.heathlyEndpoints.filterHealthyEndpoints(route.LBEndpoints, route)
v := nHosts[len(endpoints)]
nHosts[len(endpoints)] = v + 1
}

assert.Equal(t, nRequests, nHosts[3])
assert.Equal(t, 0, nHosts[2])
assert.Equal(t, 0, nHosts[1])
}

func TestFilterHealthyEndpointsFiltersUnhealthyEndpoints(t *testing.T) {
hosts := []string{"host1.test:80", "host2.test:80", "host3.test:80"}
endpointRegistry, route, proxy := initialize(hosts)

for i := range hosts {
metrics := endpointRegistry.GetMetrics(hosts[i])
for j := 0; j < 50; j++ {
metrics.IncTotalRequests()
}
}
for i := 0; i < 30; i++ {
metrics := endpointRegistry.GetMetrics(hosts[0])
metrics.IncTotalFailedRoundTrips()
}
time.Sleep(period + time.Millisecond)
endpointRegistry.Do([]*routing.Route{route})

nHosts := map[int]int{}
for i := 0; i < nRequests; i++ {
endpoints := proxy.heathlyEndpoints.filterHealthyEndpoints(route.LBEndpoints, route)
v := nHosts[len(endpoints)]
nHosts[len(endpoints)] = v + 1

if len(endpoints) == 2 {
assert.Equal(t, hosts[1], endpoints[0].Host)
assert.Equal(t, hosts[2], endpoints[1].Host)
}
}

assert.InDelta(t, 4*nRequests/10, nHosts[3], nRequests/10)
assert.InDelta(t, 6*nRequests/10, nHosts[2], nRequests/10)
assert.Equal(t, 0, nHosts[1])
}

func TestPHCForSingleHealthyEndpoint(t *testing.T) {
Expand All @@ -32,7 +149,8 @@ func TestPHCForSingleHealthyEndpoint(t *testing.T) {

doc := fmt.Sprintf(`* -> "%s"`, service.URL)
tp, err := newTestProxyWithParams(doc, Params{
EndpointRegistry: endpointRegistry,
EnablePassiveHealthCheck: true,
EndpointRegistry: endpointRegistry,
})
if err != nil {
t.Fatal(err)
Expand Down Expand Up @@ -91,7 +209,8 @@ func TestPHCForMultipleHealthyEndpoints(t *testing.T) {

doc := fmt.Sprintf(`* -> <random, "%s", "%s", "%s">`, services[0].URL, services[1].URL, services[2].URL)
tp, err := newTestProxyWithParams(doc, Params{
EndpointRegistry: endpointRegistry,
EnablePassiveHealthCheck: true,
EndpointRegistry: endpointRegistry,
})
if err != nil {
t.Fatal(err)
Expand Down Expand Up @@ -177,6 +296,7 @@ func TestPHCForMultipleHealthyAndOneUnhealthyEndpoints(t *testing.T) {

doc := fmt.Sprintf(`* -> <random, "%s", "%s", "%s">`, services[0].URL, services[1].URL, services[2].URL)
tp, err := newTestProxyWithParams(doc, Params{
EnablePassiveHealthCheck: true,
EndpointRegistry: endpointRegistry,
CustomHttpRoundTripperWrap: newRoundTripperUnhealthyHost(&RoundTripperUnhealthyHostParams{Host: services[0].URL[7:], Probability: rtFailureProbability}),
})
Expand Down Expand Up @@ -221,7 +341,5 @@ func TestPHCForMultipleHealthyAndOneUnhealthyEndpoints(t *testing.T) {
}
rsp.Body.Close()
}
assert.InDelta(t, 0.33*rtFailureProbability*nRequests, failedReqs, 0.05*nRequests)
// After PHC is implemented, I expect failed requests to decrease like this:
// assert.InDelta(t, 0.33*rtFailureProbability*(1.0-rtFailureProbability)*nRequests, failedReqs, 0.05*nRequests)
assert.InDelta(t, 0.33*rtFailureProbability*(1.0-rtFailureProbability)*nRequests, failedReqs, 0.05*nRequests)
}
Loading

0 comments on commit 19bbbe0

Please sign in to comment.