diff --git a/integration/integration_test_fixtures.py b/integration/integration_test_fixtures.py index 9b134f576..79f3bbf5f 100644 --- a/integration/integration_test_fixtures.py +++ b/integration/integration_test_fixtures.py @@ -80,6 +80,12 @@ def getNighthawkCounterMapFromJson(self, parsed_json): for counter in parsed_json["results"][0]["counters"] } + def getNighthawkGlobalHistogramsbyIdFromJson(self, parsed_json): + """ + Utility method to get the global histograms from the json indexed by id. + """ + return {statistic["id"]: statistic for statistic in parsed_json["results"][0]["statistics"]} + def getTestServerRootUri(self, https=False): """ Utility for getting the http://host:port/ that can be used to query the server we started in setUp() diff --git a/integration/test_integration_basics.py b/integration/test_integration_basics.py index 84d34902f..ad90ae9f9 100644 --- a/integration/test_integration_basics.py +++ b/integration/test_integration_basics.py @@ -10,11 +10,17 @@ IntegrationTestBase) # TODO(oschaaf): rewrite the tests so we can just hand a map of expected key values to it. +# TODO(oschaaf): we mostly verify stats observed from the client-side. Add expectations +# for the server side as well. class TestHttp(HttpIntegrationTestBase): def test_h1(self): + """ + Runs the CLI configured to use plain HTTP/1 against our test server, and sanity + checks statistics from both client and server. + """ parsed_json = self.runNighthawkClient([self.getTestServerRootUri()]) counters = self.getNighthawkCounterMapFromJson(parsed_json) self.assertEqual(counters["benchmark.http_2xx"], 25) @@ -29,7 +35,56 @@ def test_h1(self): self.assertEqual(counters["upstream_rq_total"], 25) self.assertEqual(len(counters), 9) + def mini_stress_test_h1(self, args): + # run a test with more rps then we can handle, and a very small client-side queue. + # we should observe both lots of successfull requests as well as time spend in blocking mode., + parsed_json = self.runNighthawkClient(args) + counters = self.getNighthawkCounterMapFromJson(parsed_json) + # We set a reasonably low expecation of 1000 requests. We set it low, because we want this + # test to succeed on a reasonable share of setups (hopefully practically all). + MIN_EXPECTED_REQUESTS = 1000 + self.assertGreater(counters["benchmark.http_2xx"], MIN_EXPECTED_REQUESTS) + self.assertEqual(counters["upstream_cx_http1_total"], 1) + global_histograms = self.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json) + self.assertGreater(int(global_histograms["sequencer.blocking"]["count"]), MIN_EXPECTED_REQUESTS) + self.assertGreater( + int(global_histograms["benchmark_http_client.request_to_response"]["count"]), + MIN_EXPECTED_REQUESTS) + return counters + + def test_h1_mini_stress_test_with_client_side_queueing(self): + """ + Run a max rps test with the h1 pool against our test server, using a small client-side + queue. We expect to observe: + - upstream_rq_pending_total increasing + - upstream_cx_overflow overflows + - blocking to be reported by the sequencer + """ + counters = self.mini_stress_test_h1([ + self.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests", "10", + "--duration 2" + ]) + self.assertGreater(counters["upstream_rq_pending_total"], 100) + self.assertGreater(counters["upstream_cx_overflow"], 0) + + def test_h1_mini_stress_test_without_client_side_queueing(self): + """ + Run a max rps test with the h1 pool against our test server, with no client-side + queueing. We expect to observe: + - upstream_rq_pending_total to be equal to 1 + - blocking to be reported by the sequencer + - no upstream_cx_overflows + """ + counters = self.mini_stress_test_h1( + [self.getTestServerRootUri(), "--rps", "999999", "--duration 2"]) + self.assertEqual(counters["upstream_rq_pending_total"], 1) + self.assertFalse("upstream_cx_overflow" in counters) + def test_h2(self): + """ + Runs the CLI configured to use h2c against our test server, and sanity + checks statistics from both client and server. + """ parsed_json = self.runNighthawkClient(["--h2", self.getTestServerRootUri()]) counters = self.getNighthawkCounterMapFromJson(parsed_json) self.assertEqual(counters["benchmark.http_2xx"], 25) @@ -47,6 +102,10 @@ def test_h2(self): class TestHttps(HttpsIntegrationTestBase): def test_h1(self): + """ + Runs the CLI configured to use HTTP/1 over https against our test server, and sanity + checks statistics from both client and server. + """ parsed_json = self.runNighthawkClient([self.getTestServerRootUri()]) counters = self.getNighthawkCounterMapFromJson(parsed_json) self.assertEqual(counters["benchmark.http_2xx"], 25) @@ -71,6 +130,11 @@ def test_h1(self): self.getServerStatFromJson(server_stats, "http.ingress_http.downstream_rq_2xx"), 25) def test_h2(self): + """ + Runs the CLI configured to use HTTP/2 (using https) against our test server, and sanity + checks statistics from both client and server. + """ + parsed_json = self.runNighthawkClient(["--h2", self.getTestServerRootUri()]) counters = self.getNighthawkCounterMapFromJson(parsed_json) self.assertEqual(counters["benchmark.http_2xx"], 25) @@ -91,6 +155,10 @@ def test_h2(self): def test_h1_tls_context_configuration(self): + """ + Verifies specifying tls cipher suites works with the h1 pool + """ + parsed_json = self.runNighthawkClient([ "--duration 1", "--tls-context {common_tls_context:{tls_params:{cipher_suites:[\"-ALL:ECDHE-RSA-AES128-SHA\"]}}}", @@ -109,6 +177,9 @@ def test_h1_tls_context_configuration(self): def test_h2_tls_context_configuration(self): + """ + Verifies specifying tls cipher suites works with the h2 pool + """ parsed_json = self.runNighthawkClient([ "--duration 1", "--tls-context {common_tls_context:{tls_params:{cipher_suites:[\"-ALL:ECDHE-RSA-AES128-SHA\"]}}}", diff --git a/source/client/benchmark_client_impl.cc b/source/client/benchmark_client_impl.cc index 6bafa5e9f..1de45bbea 100644 --- a/source/client/benchmark_client_impl.cc +++ b/source/client/benchmark_client_impl.cc @@ -189,16 +189,19 @@ void BenchmarkClientHttpImpl::setRequestHeader(absl::string_view key, absl::stri } bool BenchmarkClientHttpImpl::tryStartOne(std::function caller_completion_callback) { - // When no client side queueing is specified (via max_pending_requests_), we are in closed loop - // mode. In closed loop mode we want to be able to control the pacing as exactly as possible. In - // open-loop mode we probably want to skip this. NOTE(oschaaf): We can't consistently rely on - // resourceManager()::requests() because that isn't used for h/1 (it is used in tcp and h2 - // though). - if (max_pending_requests_ == 1 && - (!cluster_->resourceManager(Envoy::Upstream::ResourcePriority::Default) - .pendingRequests() - .canCreate() || - ((requests_initiated_ - requests_completed_) >= connection_limit_))) { + // When we allow client-side queuing, we want to have a sense of time spend waiting on that queue. + // So we return false here to indicate we couldn't initiate a new request. + if (!cluster_->resourceManager(Envoy::Upstream::ResourcePriority::Default) + .pendingRequests() + .canCreate()) { + return false; + } + // When no client side queueing is disabled (max_pending equals 1) we control the pacing as + // exactly as possible here. + // NOTE: We can't consistently rely on resourceManager()::requests() + // because that isn't used for h/1 (it is used in tcp and h2 though). + if ((max_pending_requests_ == 1 && + (requests_initiated_ - requests_completed_) >= connection_limit_)) { return false; }