diff --git a/CODEOWNERS b/CODEOWNERS index 925cbb03c685..2e0248d2f415 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -303,6 +303,9 @@ extensions/filters/http/oauth2 @derekargueta @snowp /*/extensions/path/uri_template_lib @alyssawilk @yanjunxiang-google /*/extensions/path/uri_template_lib/proto @alyssawilk @yanjunxiang-google +# cached health checker extension +/*/extensions/health_checkers/cached @hudayou @mainx07 @mattklein123 + # mobile /mobile/ @jpsim @Augustyniak @RyanTheOptimist @alyssawilk @abeyad diff --git a/api/BUILD b/api/BUILD index f51f62cebc1a..45a2ac27a7b5 100644 --- a/api/BUILD +++ b/api/BUILD @@ -228,6 +228,7 @@ proto_library( "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", "//envoy/extensions/formatter/metadata/v3:pkg", "//envoy/extensions/formatter/req_without_query/v3:pkg", + "//envoy/extensions/health_checkers/cached/v3:pkg", "//envoy/extensions/health_checkers/redis/v3:pkg", "//envoy/extensions/health_checkers/thrift/v3:pkg", "//envoy/extensions/http/cache/file_system_http_cache/v3:pkg", diff --git a/api/envoy/data/core/v3/health_check_event.proto b/api/envoy/data/core/v3/health_check_event.proto index a349fa31bc08..610f4e1b07f8 100644 --- a/api/envoy/data/core/v3/health_check_event.proto +++ b/api/envoy/data/core/v3/health_check_event.proto @@ -32,6 +32,7 @@ enum HealthCheckerType { GRPC = 2; REDIS = 3; THRIFT = 4; + CACHED = 5; } // [#next-free-field: 10] diff --git a/api/envoy/extensions/health_checkers/cached/v3/BUILD b/api/envoy/extensions/health_checkers/cached/v3/BUILD new file mode 100644 index 000000000000..ee92fb652582 --- /dev/null +++ b/api/envoy/extensions/health_checkers/cached/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/health_checkers/cached/v3/cached.proto b/api/envoy/extensions/health_checkers/cached/v3/cached.proto new file mode 100644 index 000000000000..06f9e6279843 --- /dev/null +++ b/api/envoy/extensions/health_checkers/cached/v3/cached.proto @@ -0,0 +1,73 @@ +syntax = "proto3"; + +package envoy.extensions.health_checkers.cached.v3; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.health_checkers.cached.v3"; +option java_outer_classname = "CachedProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/health_checkers/cached/v3;cachedv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Cached] +// Cached health checker :ref:`configuration overview `. +// [#extension: envoy.health_checkers.cached] + +// [#next-free-field: 9] +message Cached { + // [#next-free-field: 7] + message TlsOptions { + // whether tls is enabled for the cache server. + bool enabled = 1; + + // cacert is an optional name of a CA certificate/bundle file to load + // and use for validation of the cache server. + string cacert = 2; + + // capath is an optional directory path where trusted CA certificate files are + // stored in an OpenSSL-compatible structure. + string capath = 3; + + // cert and key are optional names of a client side + // certificate and private key files to use for authentication. They need to + // be both specified or omitted. + string cert = 4; + + // cert and key are optional names of a client side + // certificate and private key files to use for authentication. They need to + // be both specified or omitted. + string key = 5; + + // sni is an optional and will be used as a server name indication + // (SNI) TLS extension. + string sni = 6; + } + + // hostname of the cache server. + string host = 1; + + // port number of the cache server. + uint32 port = 2 [(validate.rules).uint32 = {lte: 65535}]; + + // username used to authenticate with the cache server. + string user = 3; + + // password used to authenticate with the cache server. + string password = 4; + + // database number of the cache server. + uint32 db = 5 [(validate.rules).uint32 = {lt: 2147483647}]; + + // connect timeout of the cache server. + google.protobuf.Duration connect_timeout = 6; + + // command timeout of the cache server. + google.protobuf.Duration command_timeout = 7; + + // tls options of the cache server. + TlsOptions tls_options = 8; +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index aa51c2321281..924ea2515cbc 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -166,6 +166,7 @@ proto_library( "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", "//envoy/extensions/formatter/metadata/v3:pkg", "//envoy/extensions/formatter/req_without_query/v3:pkg", + "//envoy/extensions/health_checkers/cached/v3:pkg", "//envoy/extensions/health_checkers/redis/v3:pkg", "//envoy/extensions/health_checkers/thrift/v3:pkg", "//envoy/extensions/http/cache/file_system_http_cache/v3:pkg", diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 1aac78152bd4..a394031e415f 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -310,6 +310,33 @@ envoy_cmake( ], ) +envoy_cmake( + name = "hiredis", + cache_entries = { + "ENABLE_SSL": "on", + "DISABLE_TESTS": "on", + }, + env = { + "CXXFLAGS": "-fPIC", + "CFLAGS": "-fPIC", + }, + lib_source = "@com_github_hiredis//:all", + out_static_libs = select({ + "//bazel:windows_x86_64": [ + "hiredis_static.lib", + "hiredis_ssl_static.lib", + ], + "//conditions:default": [ + "libhiredis.a", + "libhiredis_ssl.a", + ], + }), + deps = [ + "//external:crypto", + "//external:ssl", + ], +) + envoy_cmake( name = "event", cache_entries = { diff --git a/bazel/foreign_cc/hiredis.patch b/bazel/foreign_cc/hiredis.patch new file mode 100644 index 000000000000..e9fd2b3f4651 --- /dev/null +++ b/bazel/foreign_cc/hiredis.patch @@ -0,0 +1,103 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 3d52d0c..e03b5d4 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -44,9 +44,7 @@ IF(WIN32) + ADD_DEFINITIONS(-D_CRT_SECURE_NO_WARNINGS -DWIN32_LEAN_AND_MEAN) + ENDIF() + +-ADD_LIBRARY(hiredis SHARED ${hiredis_sources}) + ADD_LIBRARY(hiredis_static STATIC ${hiredis_sources}) +-ADD_LIBRARY(hiredis::hiredis ALIAS hiredis) + ADD_LIBRARY(hiredis::hiredis_static ALIAS hiredis_static) + + IF(NOT MSVC) +@@ -54,25 +52,18 @@ IF(NOT MSVC) + PROPERTIES OUTPUT_NAME hiredis) + ENDIF() + +-SET_TARGET_PROPERTIES(hiredis +- PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE +- VERSION "${HIREDIS_SONAME}") + IF(MSVC) + SET_TARGET_PROPERTIES(hiredis_static + PROPERTIES COMPILE_FLAGS /Z7) + ENDIF() + IF(WIN32 OR MINGW) +- TARGET_LINK_LIBRARIES(hiredis PUBLIC ws2_32 crypt32) + TARGET_LINK_LIBRARIES(hiredis_static PUBLIC ws2_32 crypt32) + ELSEIF(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") +- TARGET_LINK_LIBRARIES(hiredis PUBLIC m) + TARGET_LINK_LIBRARIES(hiredis_static PUBLIC m) + ELSEIF(CMAKE_SYSTEM_NAME MATCHES "SunOS") +- TARGET_LINK_LIBRARIES(hiredis PUBLIC socket) + TARGET_LINK_LIBRARIES(hiredis_static PUBLIC socket) + ENDIF() + +-TARGET_INCLUDE_DIRECTORIES(hiredis PUBLIC $ $) + TARGET_INCLUDE_DIRECTORIES(hiredis_static PUBLIC $ $) + + CONFIGURE_FILE(hiredis.pc.in hiredis.pc @ONLY) +@@ -103,7 +94,7 @@ set(CPACK_RPM_PACKAGE_AUTOREQPROV ON) + + include(CPack) + +-INSTALL(TARGETS hiredis hiredis_static ++INSTALL(TARGETS hiredis_static + EXPORT hiredis-targets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} +@@ -161,8 +152,6 @@ IF(ENABLE_SSL) + FIND_PACKAGE(OpenSSL REQUIRED) + SET(hiredis_ssl_sources + ssl.c) +- ADD_LIBRARY(hiredis_ssl SHARED +- ${hiredis_ssl_sources}) + ADD_LIBRARY(hiredis_ssl_static STATIC + ${hiredis_ssl_sources}) + IF(NOT MSVC) +@@ -174,26 +163,19 @@ IF(ENABLE_SSL) + SET_PROPERTY(TARGET hiredis_ssl PROPERTY LINK_FLAGS "-Wl,-undefined -Wl,dynamic_lookup") + ENDIF() + +- SET_TARGET_PROPERTIES(hiredis_ssl +- PROPERTIES +- WINDOWS_EXPORT_ALL_SYMBOLS TRUE +- VERSION "${HIREDIS_SONAME}") + IF(MSVC) + SET_TARGET_PROPERTIES(hiredis_ssl_static + PROPERTIES COMPILE_FLAGS /Z7) + ENDIF() + +- TARGET_INCLUDE_DIRECTORIES(hiredis_ssl PRIVATE "${OPENSSL_INCLUDE_DIR}") + TARGET_INCLUDE_DIRECTORIES(hiredis_ssl_static PRIVATE "${OPENSSL_INCLUDE_DIR}") + +- TARGET_LINK_LIBRARIES(hiredis_ssl PRIVATE ${OPENSSL_LIBRARIES}) + IF (WIN32 OR MINGW) +- TARGET_LINK_LIBRARIES(hiredis_ssl PRIVATE hiredis) + TARGET_LINK_LIBRARIES(hiredis_ssl_static PUBLIC hiredis_static) + ENDIF() + CONFIGURE_FILE(hiredis_ssl.pc.in hiredis_ssl.pc @ONLY) + +- INSTALL(TARGETS hiredis_ssl hiredis_ssl_static ++ INSTALL(TARGETS hiredis_ssl_static + EXPORT hiredis_ssl-targets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} +diff --git a/ssl.c b/ssl.c +index 7d7ff66..491ce5a 100644 +--- a/ssl.c ++++ b/ssl.c +@@ -40,6 +40,12 @@ + #ifdef _WIN32 + #include + #include ++#undef X509_NAME ++#undef X509_EXTENSIONS ++#undef PKCS7_ISSUER_AND_SERIAL ++#undef PKCS7_SIGNER_INFO ++#undef OCSP_REQUEST ++#undef OCSP_RESPONSE + #else + #include + #endif diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index bd8c7cc3a3b5..79261290267b 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -194,6 +194,7 @@ def envoy_dependencies(skip_targets = []): _com_google_protobuf() _io_opencensus_cpp() _com_github_curl() + _com_github_hiredis() _com_github_envoyproxy_sqlparser() _v8() _com_googlesource_chromium_base_trace_event_common() @@ -862,6 +863,21 @@ cc_library(name = "curl", visibility = ["//visibility:public"], deps = ["@envoy/ actual = "@envoy//bazel/foreign_cc:curl", ) +def _com_github_hiredis(): + external_http_archive( + name = "com_github_hiredis", + build_file_content = BUILD_ALL_CONTENT + """ +cc_library(name = "hiredis", visibility = ["//visibility:public"], deps = ["@envoy//bazel/foreign_cc:hiredis"]) +""", + # To fix wincrypt symbols conflict + patches = ["@envoy//bazel/foreign_cc:hiredis.patch"], + patch_args = ["-p1"], + ) + native.bind( + name = "hiredis", + actual = "@envoy//bazel/foreign_cc:hiredis", + ) + def _v8(): external_http_archive( name = "v8", diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index fd9d5790f089..cbf286be91a4 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -1047,6 +1047,23 @@ REPOSITORY_LOCATIONS_SPEC = dict( license = "curl", license_url = "https://github.com/curl/curl/blob/curl-{underscore_version}/COPYING", ), + com_github_hiredis = dict( + project_name = "hiredis", + project_desc = "Minimalistic C client for Redis >= 1.2", + project_url = "https://github.com/redis/hiredis", + version = "1.1.0", + sha256 = "fe6d21741ec7f3fc9df409d921f47dfc73a4d8ff64f4ac6f1d95f951bf7f53d6", + strip_prefix = "hiredis-{version}", + urls = ["https://github.com/redis/hiredis/archive/refs/tags/v{version}.tar.gz"], + use_category = ["dataplane_ext"], + extensions = [ + "envoy.health_checkers.cached", + ], + release_date = "2022-11-16", + cpe = "N/A", + license = "BSD-3-Clause", + license_url = "https://github.com/redis/hiredis/blob/v{version}/COPYING", + ), v8 = dict( project_name = "V8", project_desc = "Google’s open source high-performance JavaScript and WebAssembly engine, written in C++", diff --git a/changelogs/current.yaml b/changelogs/current.yaml index 40abfa27ea6c..67dca0940729 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -230,6 +230,9 @@ new_features: - area: health_check change: | added an optional bool flag :ref:`disable_active_health_check ` to disable the active health check for the endpoint. +- area: health check + change: | + added :ref:`cached health check ` as a :ref:`custom health check `. - area: mobile change: | started merging the Envoy mobile library into the main Envoy repo. diff --git a/docs/root/configuration/upstream/health_checkers/cached.rst b/docs/root/configuration/upstream/health_checkers/cached.rst new file mode 100644 index 000000000000..5e7f00b44455 --- /dev/null +++ b/docs/root/configuration/upstream/health_checkers/cached.rst @@ -0,0 +1,31 @@ +.. _config_health_checkers_cached: + +Cached Health Checker +===================== + +The Cached Health Checker (with :code:`envoy.health_checkers.cached` as name) subscribe to a redis cache for upstream host health check notification. +Once the health check result is set in the redis cache by a third party checker, a keyspace set event is recevied from the redis cache, then it get +the health check result and store it in a in memory cache. + + +An example for :ref:`custom_health_check ` +using the Cached health checker is shown below: + + +.. code-block:: yaml + + custom_health_check: + name: envoy.health_checkers.cached + typed_config: + "@type": type.googleapis.com/envoy.extensions.health_checkers.cached.v3.Cached + host: localhost + port: 6400 + password: foobared + db: 100 + tls_options: + enabled: true + cacert: /etc/redis/ca.crt + cert: /etc/redis/client.crt + key: /etc/redis/client.key + +* :ref:`v3 API reference ` diff --git a/docs/root/configuration/upstream/health_checkers/health_checkers.rst b/docs/root/configuration/upstream/health_checkers/health_checkers.rst index 61b50009af14..cbd074408822 100644 --- a/docs/root/configuration/upstream/health_checkers/health_checkers.rst +++ b/docs/root/configuration/upstream/health_checkers/health_checkers.rst @@ -6,5 +6,6 @@ Health checkers .. toctree:: :maxdepth: 2 + cached redis thrift diff --git a/docs/root/intro/arch_overview/upstream/health_checking.rst b/docs/root/intro/arch_overview/upstream/health_checking.rst index 74777ef8e057..c2fefe64ba10 100644 --- a/docs/root/intro/arch_overview/upstream/health_checking.rst +++ b/docs/root/intro/arch_overview/upstream/health_checking.rst @@ -19,6 +19,9 @@ unhealthy, successes required before marking a host healthy, etc.): * **L3/L4**: During L3/L4 health checking, Envoy will send a configurable byte buffer to the upstream host. It expects the byte buffer to be echoed in the response if the host is to be considered healthy. Envoy also supports connect only L3/L4 health checking. +* **Cached**: With cached health checking, Envoy will subscribe to a Redis cache and get the cached + health check result once receiving the keyspace key set events from it. See + :ref:`cached `. * **Redis**: Envoy will send a Redis PING command and expect a PONG response. The upstream Redis server can respond with anything other than PONG to cause an immediate active health check failure. Optionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist diff --git a/envoy/server/health_checker_config.h b/envoy/server/health_checker_config.h index 82f27123db8c..72ca4fe59bd3 100644 --- a/envoy/server/health_checker_config.h +++ b/envoy/server/health_checker_config.h @@ -4,6 +4,7 @@ #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/config/typed_config.h" #include "envoy/runtime/runtime.h" +#include "envoy/singleton/manager.h" #include "envoy/upstream/health_checker.h" namespace Envoy { @@ -46,6 +47,11 @@ class HealthCheckerFactoryContext { * @return Api::Api& the API used by the server. */ virtual Api::Api& api() PURE; + + /** + * @return Singleton::Manager& the server-wide singleton manager. + */ + virtual Singleton::Manager& singletonManager() PURE; }; /** diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc index f76ec5fbe836..bc796dd92ddf 100644 --- a/source/common/upstream/cluster_factory_impl.cc +++ b/source/common/upstream/cluster_factory_impl.cc @@ -123,7 +123,7 @@ ClusterFactoryImplBase::create(Server::Configuration::ServerFactoryContext& serv new_cluster_pair.first->setHealthChecker(HealthCheckerFactory::create( cluster.health_checks()[0], *new_cluster_pair.first, context.runtime(), context.mainThreadDispatcher(), context.logManager(), context.messageValidationVisitor(), - context.api())); + context.api(), context.singletonManager())); } } diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 1cbe74607c6c..96ce3c14a098 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -74,10 +74,10 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec Event::Dispatcher& dispatcher, HealthCheckEventLoggerPtr&& event_logger, ProtobufMessage::ValidationVisitor& validation_visitor, - Api::Api& api) + Api::Api& api, Singleton::Manager& singleton_manager) : cluster_(cluster), runtime_(runtime), dispatcher_(dispatcher), - event_logger_(std::move(event_logger)), validation_visitor_(validation_visitor), api_(api) { - } + event_logger_(std::move(event_logger)), validation_visitor_(validation_visitor), api_(api), + singleton_manager_(singleton_manager) {} Upstream::Cluster& cluster() override { return cluster_; } Envoy::Runtime::Loader& runtime() override { return runtime_; } Event::Dispatcher& mainThreadDispatcher() override { return dispatcher_; } @@ -86,6 +86,7 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec return validation_visitor_; } Api::Api& api() override { return api_; } + Singleton::Manager& singletonManager() override { return singleton_manager_; } private: Upstream::Cluster& cluster_; @@ -94,13 +95,16 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec HealthCheckEventLoggerPtr event_logger_; ProtobufMessage::ValidationVisitor& validation_visitor_; Api::Api& api_; + Singleton::Manager& singleton_manager_; }; -HealthCheckerSharedPtr HealthCheckerFactory::create( - const envoy::config::core::v3::HealthCheck& health_check_config, Upstream::Cluster& cluster, - Runtime::Loader& runtime, Event::Dispatcher& dispatcher, - AccessLog::AccessLogManager& log_manager, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) { +HealthCheckerSharedPtr +HealthCheckerFactory::create(const envoy::config::core::v3::HealthCheck& health_check_config, + Upstream::Cluster& cluster, Runtime::Loader& runtime, + Event::Dispatcher& dispatcher, + AccessLog::AccessLogManager& log_manager, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, + Singleton::Manager& singleton_manager) { HealthCheckEventLoggerPtr event_logger; if (!health_check_config.event_log_path().empty()) { event_logger = std::make_unique( @@ -130,7 +134,7 @@ HealthCheckerSharedPtr HealthCheckerFactory::create( health_check_config.custom_health_check()); std::unique_ptr context( new HealthCheckerFactoryContextImpl(cluster, runtime, dispatcher, std::move(event_logger), - validation_visitor, api)); + validation_visitor, api, singleton_manager)); return factory.createCustomHealthChecker(health_check_config, *context); } } diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index 5728df9f6832..29d2e8ff90d0 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -47,7 +47,8 @@ class HealthCheckerFactory : public Logger::Loggable create(const envoy::config::core::v3::HealthCheck& health_check_config, Upstream::Cluster& cluster, Runtime::Loader& runtime, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, + Singleton::Manager& singleton_manager); }; /** diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index 7a8386069184..7854f90ea3fc 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -423,7 +423,7 @@ void HdsCluster::updateHealthchecks( auto new_health_checker = Upstream::HealthCheckerFactory::create( health_check, *this, server_context_.runtime(), server_context_.mainThreadDispatcher(), server_context_.accessLogManager(), server_context_.messageValidationVisitor(), - server_context_.api()); + server_context_.api(), server_context_.singletonManager()); health_checkers_map.insert({health_check, new_health_checker}); health_checkers.push_back(new_health_checker); @@ -536,7 +536,7 @@ void HdsCluster::initHealthchecks() { auto health_checker = Upstream::HealthCheckerFactory::create( health_check, *this, server_context_.runtime(), server_context_.mainThreadDispatcher(), server_context_.accessLogManager(), server_context_.messageValidationVisitor(), - server_context_.api()); + server_context_.api(), server_context_.singletonManager()); health_checkers_.push_back(health_checker); health_checkers_map_.insert({health_check, health_checker}); diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 4e094dcfc15d..d8f81dcbfee2 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -60,6 +60,7 @@ EXTENSIONS = { # Health checkers # + "envoy.health_checkers.cached": "//source/extensions/health_checkers/cached:config", "envoy.health_checkers.redis": "//source/extensions/health_checkers/redis:config", "envoy.health_checkers.thrift": "//source/extensions/health_checkers/thrift:config", diff --git a/source/extensions/extensions_metadata.yaml b/source/extensions/extensions_metadata.yaml index 1c6093744c4c..b3721a7b775e 100644 --- a/source/extensions/extensions_metadata.yaml +++ b/source/extensions/extensions_metadata.yaml @@ -736,6 +736,13 @@ envoy.health_checkers.thrift: status: alpha type_urls: - envoy.extensions.health_checkers.thrift.v3.Thrift +envoy.health_checkers.cached: + categories: + - envoy.health_checkers + security_posture: requires_trusted_downstream_and_upstream + status: alpha + type_urls: + - envoy.extensions.health_checkers.cached.v3.Cached envoy.http.original_ip_detection.custom_header: categories: - envoy.http.original_ip_detection diff --git a/source/extensions/health_checkers/cached/BUILD b/source/extensions/health_checkers/cached/BUILD new file mode 100644 index 000000000000..d689311184d0 --- /dev/null +++ b/source/extensions/health_checkers/cached/BUILD @@ -0,0 +1,86 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +# Cached custom health checker. + +envoy_extension_package() + +envoy_cc_library( + name = "client_interface", + hdrs = ["client.h"], + deps = [ + "//envoy/event:dispatcher_interface", + "//envoy/singleton:manager_interface", + ], +) + +envoy_cc_library( + name = "hiredis", + srcs = ["hiredis.cc"], + hdrs = ["hiredis.h"], + external_deps = ["hiredis"], + deps = [ + ":client_interface", + "//source/common/common:assert_lib", + "//source/common/common:backoff_lib", + "//source/common/event:dispatcher_lib", + "//source/common/json:json_loader_lib", + ], +) + +envoy_cc_library( + name = "client_lib", + srcs = ["client_impl.cc"], + hdrs = ["client_impl.h"], + deps = [ + ":client_interface", + ":hiredis", + ], +) + +envoy_cc_library( + name = "cached", + srcs = ["cached.cc"], + hdrs = ["cached.h"], + deps = [ + ":client_interface", + "//source/common/upstream:health_checker_base_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/data/core/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/health_checkers/cached/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":cached", + ":client_lib", + ":utility", + "//envoy/registry", + "//envoy/server:health_checker_config_interface", + "//source/common/common:assert_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/health_checkers/cached/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "utility", + hdrs = ["utility.h"], + deps = [ + "//source/common/config:utility_lib", + "//source/common/protobuf", + "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/health_checkers/cached/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/health_checkers/cached/cached.cc b/source/extensions/health_checkers/cached/cached.cc new file mode 100644 index 000000000000..ec24213e74d4 --- /dev/null +++ b/source/extensions/health_checkers/cached/cached.cc @@ -0,0 +1,105 @@ +#include "source/extensions/health_checkers/cached/cached.h" + +#include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/data/core/v3/health_check_event.pb.h" +#include "envoy/extensions/health_checkers/cached/v3/cached.pb.h" + +namespace Envoy { +namespace Extensions { +namespace HealthCheckers { +namespace CachedHealthChecker { + +// Helper functions to get the correct hostname for a cached health check. +const std::string& getHostname(const Upstream::HostSharedPtr& host) { + return host->hostnameForHealthChecks().empty() ? host->hostname() + : host->hostnameForHealthChecks(); +} + +ConnectionOptionsPtr +getConnectionOptions(const envoy::extensions::health_checkers::cached::v3::Cached& config) { + ConnectionOptions opts; + opts.host = config.host(); + opts.port = config.port(); + if (!config.user().empty()) + opts.user = config.user(); + opts.password = config.password(); + opts.db = config.db(); + opts.connect_timeout = + std::chrono::milliseconds(DurationUtil::durationToMilliseconds(config.connect_timeout())); + opts.command_timeout = + std::chrono::milliseconds(DurationUtil::durationToMilliseconds(config.command_timeout())); + if (config.has_tls_options()) { + opts.tls.enabled = config.tls_options().enabled(); + opts.tls.cacert = config.tls_options().cacert(); + opts.tls.capath = config.tls_options().capath(); + opts.tls.cert = config.tls_options().cert(); + opts.tls.key = config.tls_options().key(); + opts.tls.sni = config.tls_options().sni(); + } + + return std::make_shared(opts); +} + +CachedHealthChecker::CachedHealthChecker( + const Upstream::Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, + const envoy::extensions::health_checkers::cached::v3::Cached& cached_config, + Event::Dispatcher& dispatcher, Runtime::Loader& runtime, + Upstream::HealthCheckEventLoggerPtr&& event_logger, Api::Api& api, + Singleton::Manager& singleton_manager, ClientFactory& client_factory) + : HealthCheckerImplBase(cluster, config, dispatcher, runtime, api.randomGenerator(), + std::move(event_logger)), + dispatcher_(dispatcher), + client_(client_factory.create(singleton_manager, getConnectionOptions(cached_config), + dispatcher, api.randomGenerator())) {} + +CachedHealthChecker::CachedActiveHealthCheckSession::CachedActiveHealthCheckSession( + CachedHealthChecker& parent, const Upstream::HostSharedPtr& host) + : ActiveHealthCheckSession(parent, host), parent_(parent), hostname_(getHostname(host)) { + ENVOY_LOG(trace, "CachedActiveHealthCheckSession construct hostname={}", hostname_); + delayed_interval_timer_ = + parent_.dispatcher_.createTimer([this]() -> void { onDelayedIntervalTimeout(); }); + parent_.client_->start(hostname_); +} + +CachedHealthChecker::CachedActiveHealthCheckSession::~CachedActiveHealthCheckSession() { + ENVOY_LOG(trace, "CachedActiveHealthCheckSession destruct"); + if (delayed_interval_timer_ && delayed_interval_timer_->enabled()) { + delayed_interval_timer_->disableTimer(); + } + parent_.client_->close(hostname_); +} + +void CachedHealthChecker::CachedActiveHealthCheckSession::onDeferredDelete() { + ENVOY_LOG(trace, "CachedActiveHealthCheckSession onDeferredDelete"); + if (delayed_interval_timer_ && delayed_interval_timer_->enabled()) { + delayed_interval_timer_->disableTimer(); + } + parent_.client_->close(hostname_); +} + +void CachedHealthChecker::CachedActiveHealthCheckSession::onInterval() { + ENVOY_LOG(trace, "CachedActiveHealthCheckSession onInterval"); + delayed_interval_timer_->enableTimer(std::chrono::milliseconds(DELAY_INTERVAL_MS)); +} + +void CachedHealthChecker::CachedActiveHealthCheckSession::onDelayedIntervalTimeout() { + delayed_interval_timer_->disableTimer(); + bool is_healthy = parent_.client_->sendRequest(hostname_); + ENVOY_LOG(trace, + "CachedActiveHealthCheckSession onDelayedIntervalTimeout. hostname: {}, is_healthy: {}", + hostname_, is_healthy); + if (is_healthy) { + handleSuccess(); + } else { + handleFailure(envoy::data::core::v3::ACTIVE); + } +} + +void CachedHealthChecker::CachedActiveHealthCheckSession::onTimeout() { + ENVOY_LOG(trace, "CachedActiveHealthCheckSession onTimeout"); +} + +} // namespace CachedHealthChecker +} // namespace HealthCheckers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/health_checkers/cached/cached.h b/source/extensions/health_checkers/cached/cached.h new file mode 100644 index 000000000000..7487f7a4aa97 --- /dev/null +++ b/source/extensions/health_checkers/cached/cached.h @@ -0,0 +1,74 @@ +#pragma once + +#include + +#include "envoy/api/api.h" +#include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/data/core/v3/health_check_event.pb.h" +#include "envoy/extensions/health_checkers/cached/v3/cached.pb.h" + +#include "source/common/config/utility.h" +#include "source/common/upstream/health_checker_base_impl.h" +#include "source/extensions/health_checkers/cached/client.h" + +namespace Envoy { +namespace Extensions { +namespace HealthCheckers { +namespace CachedHealthChecker { + +/** + * Cached health checker implementation. + */ +class CachedHealthChecker : public Upstream::HealthCheckerImplBase { +public: + CachedHealthChecker(const Upstream::Cluster& cluster, + const envoy::config::core::v3::HealthCheck& config, + const envoy::extensions::health_checkers::cached::v3::Cached& cached_config, + Event::Dispatcher& dispatcher, Runtime::Loader& runtime, + Upstream::HealthCheckEventLoggerPtr&& event_logger, Api::Api& api, + Singleton::Manager& singleton_manager, ClientFactory& client_factory); + +protected: + envoy::data::core::v3::HealthCheckerType healthCheckerType() const override { + return envoy::data::core::v3::CACHED; + } + +private: + friend class CachedHealthCheckerTest; + + class CachedActiveHealthCheckSession : public ActiveHealthCheckSession { + public: + CachedActiveHealthCheckSession(CachedHealthChecker& parent, + const Upstream::HostSharedPtr& host); + ~CachedActiveHealthCheckSession() override; + + // ActiveHealthCheckSession + void onInterval() override; + void onTimeout() override; + void onDeferredDelete() final; + + const uint32_t DELAY_INTERVAL_MS = 999; + + private: + void onDelayedIntervalTimeout(); + + CachedHealthChecker& parent_; + const std::string& hostname_; + Event::TimerPtr delayed_interval_timer_; + }; + + using CachedActiveHealthCheckSessionPtr = std::unique_ptr; + + // HealthCheckerImplBase + ActiveHealthCheckSessionPtr makeSession(Upstream::HostSharedPtr host) override { + return std::make_unique(*this, host); + } + + Event::Dispatcher& dispatcher_; + ClientPtr client_; +}; + +} // namespace CachedHealthChecker +} // namespace HealthCheckers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/health_checkers/cached/client.h b/source/extensions/health_checkers/cached/client.h new file mode 100644 index 000000000000..22458345a238 --- /dev/null +++ b/source/extensions/health_checkers/cached/client.h @@ -0,0 +1,128 @@ +#pragma once + +#include "envoy/event/dispatcher.h" +#include "envoy/singleton/manager.h" + +namespace Envoy { +namespace Extensions { +namespace HealthCheckers { +namespace CachedHealthChecker { + +/** + * A singleton cached client connection. + */ +class Client { +public: + virtual ~Client() = default; + + /** + * Initialize the connection. + */ + virtual void start(const std::string& hostname) PURE; + + /** + * Send the health check request. + */ + virtual bool sendRequest(const std::string& hostname) PURE; + + /** + * Close the underlying network connection. + */ + virtual void close(const std::string& hostname) PURE; +}; + +using ClientPtr = std::shared_ptr; + +struct TlsOptions { + bool enabled = false; + + std::string cacert; + + std::string capath; + + std::string cert; + + std::string key; + + std::string sni; +}; + +struct ConnectionOptions { + std::string host; + + int port = 6379; + + std::string user = "default"; + + std::string password; + + uint32_t db = 0; + + std::chrono::milliseconds connect_timeout{0}; + + std::chrono::milliseconds command_timeout{0}; + + TlsOptions tls; +}; + +using ConnectionOptionsPtr = std::shared_ptr; + +/** + * A factory for the singleton cached client connection. + */ +class ClientFactory { +public: + virtual ~ClientFactory() = default; + + /** + * Create a singleton cached client connection. + * @param the server-wide singleton manager. + * @param the connection options to underlying network connection. + * @param the main thread's dispatcher. + * @param the random number generator. + * for all singleton processing. + */ + virtual ClientPtr create(Singleton::Manager& singleton_manager, ConnectionOptionsPtr opts, + Event::Dispatcher& dispatcher, Random::RandomGenerator& random) PURE; +}; + +/** + * A cache for host health check status. + */ + +class Cache { +public: + virtual ~Cache() = default; + + /** + * Add host health check status entry in the cache. + */ + virtual void add(const std::string& hostname) PURE; + + /** + * Remove host health check status entry in the cache. + */ + virtual void remove(const std::string& hostname) PURE; + + /** + * Get host health check status entry in the cache. + */ + virtual bool get(const std::string& hostname) PURE; + + /** + * Callback for remote cache set health check status. + */ + virtual void onSetCache(const std::string& hostname) PURE; + + /** + * Callback for get health check status from remote cache. + */ + virtual void onGetCache(const std::string& hostname, const std::string& status) PURE; +}; + +using CachePtr = std::shared_ptr; + +} // namespace CachedHealthChecker +} // namespace HealthCheckers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/health_checkers/cached/client_impl.cc b/source/extensions/health_checkers/cached/client_impl.cc new file mode 100644 index 000000000000..7e08ca3fda0a --- /dev/null +++ b/source/extensions/health_checkers/cached/client_impl.cc @@ -0,0 +1,37 @@ +#include "source/extensions/health_checkers/cached/client_impl.h" + +namespace Envoy { +namespace Extensions { +namespace HealthCheckers { +namespace CachedHealthChecker { + +SINGLETON_MANAGER_REGISTRATION(cached_health_check_client); + +ClientImpl::ClientImpl(ConnectionOptionsPtr opts, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random) { + cache_ = std::make_shared(opts, dispatcher, random); +} + +void ClientImpl::start(const std::string& hostname) { cache_->add(hostname); } + +void ClientImpl::close(const std::string& hostname) { cache_->remove(hostname); } + +bool ClientImpl::sendRequest(const std::string& hostname) { return cache_->get(hostname); } + +ClientFactoryImpl ClientFactoryImpl::instance_; + +ClientPtr ClientFactoryImpl::create(Singleton::Manager& singleton_manager, + ConnectionOptionsPtr opts, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random) { + auto client = singleton_manager.getTyped( + SINGLETON_MANAGER_REGISTERED_NAME(cached_health_check_client), + [&opts, &dispatcher, &random]() { + return std::make_shared(opts, dispatcher, random); + }); + return client; +} + +} // namespace CachedHealthChecker +} // namespace HealthCheckers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/health_checkers/cached/client_impl.h b/source/extensions/health_checkers/cached/client_impl.h new file mode 100644 index 000000000000..b22cfb4091d1 --- /dev/null +++ b/source/extensions/health_checkers/cached/client_impl.h @@ -0,0 +1,36 @@ +#pragma once + +#include "source/extensions/health_checkers/cached/client.h" +#include "source/extensions/health_checkers/cached/hiredis.h" + +namespace Envoy { +namespace Extensions { +namespace HealthCheckers { +namespace CachedHealthChecker { + +class ClientImpl : public Client, public Singleton::Instance { +public: + ClientImpl(ConnectionOptionsPtr opts, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random); + + void start(const std::string& hostname) override; + bool sendRequest(const std::string& hostname) override; + void close(const std::string& hostname) override; + +private: + CachePtr cache_; +}; + +class ClientFactoryImpl : public ClientFactory { +public: + // ClientFactory + ClientPtr create(Singleton::Manager& singleton_manager, ConnectionOptionsPtr opts, + Event::Dispatcher& dispatcher, Random::RandomGenerator& random) override; + + static ClientFactoryImpl instance_; +}; + +} // namespace CachedHealthChecker +} // namespace HealthCheckers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/health_checkers/cached/config.cc b/source/extensions/health_checkers/cached/config.cc new file mode 100644 index 000000000000..beeec94a96fc --- /dev/null +++ b/source/extensions/health_checkers/cached/config.cc @@ -0,0 +1,33 @@ +#include "source/extensions/health_checkers/cached/config.h" + +#include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/registry/registry.h" + +#include "source/common/config/utility.h" +#include "source/extensions/health_checkers/cached/client_impl.h" +#include "source/extensions/health_checkers/cached/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HealthCheckers { +namespace CachedHealthChecker { + +Upstream::HealthCheckerSharedPtr CachedHealthCheckerFactory::createCustomHealthChecker( + const envoy::config::core::v3::HealthCheck& config, + Server::Configuration::HealthCheckerFactoryContext& context) { + return std::make_shared( + context.cluster(), config, + getCachedHealthCheckConfig(config, context.messageValidationVisitor()), + context.mainThreadDispatcher(), context.runtime(), context.eventLogger(), context.api(), + context.singletonManager(), ClientFactoryImpl::instance_); +}; + +/** + * Static registration for the cached custom health checker. @see RegisterFactory. + */ +REGISTER_FACTORY(CachedHealthCheckerFactory, Server::Configuration::CustomHealthCheckerFactory); + +} // namespace CachedHealthChecker +} // namespace HealthCheckers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/health_checkers/cached/config.h b/source/extensions/health_checkers/cached/config.h new file mode 100644 index 000000000000..fa319743c275 --- /dev/null +++ b/source/extensions/health_checkers/cached/config.h @@ -0,0 +1,33 @@ +#pragma once + +#include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/extensions/health_checkers/cached/v3/cached.pb.h" +#include "envoy/extensions/health_checkers/cached/v3/cached.pb.validate.h" +#include "envoy/server/health_checker_config.h" + +#include "source/extensions/health_checkers/cached/cached.h" + +namespace Envoy { +namespace Extensions { +namespace HealthCheckers { +namespace CachedHealthChecker { + +/** + * Config registration for the cached health checker. + */ +class CachedHealthCheckerFactory : public Server::Configuration::CustomHealthCheckerFactory { +public: + Upstream::HealthCheckerSharedPtr + createCustomHealthChecker(const envoy::config::core::v3::HealthCheck& config, + Server::Configuration::HealthCheckerFactoryContext& context) override; + + std::string name() const override { return "envoy.health_checkers.cached"; } + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return ProtobufTypes::MessagePtr{new envoy::extensions::health_checkers::cached::v3::Cached()}; + } +}; + +} // namespace CachedHealthChecker +} // namespace HealthCheckers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/health_checkers/cached/hiredis.cc b/source/extensions/health_checkers/cached/hiredis.cc new file mode 100644 index 000000000000..c316ed6c5829 --- /dev/null +++ b/source/extensions/health_checkers/cached/hiredis.cc @@ -0,0 +1,573 @@ +#include "source/extensions/health_checkers/cached/hiredis.h" + +#include "source/common/common/assert.h" +#include "source/common/event/dispatcher_impl.h" + +namespace Envoy { +namespace Extensions { +namespace HealthCheckers { +namespace CachedHealthChecker { + +namespace { + +static inline bool isError(redisReply& reply) { return reply.type == REDIS_REPLY_ERROR; } + +static inline void throwError(const redisContext& ctx, const std::string& err_info) { + auto err_code = ctx.err; + const auto* err_str = ctx.errstr; + if (err_str == nullptr) { + throw EnvoyException(err_info + static_cast(": null error message: ") + + std::to_string(err_code)); + } else { + auto err_msg = err_info + static_cast(": ") + err_str; + throw EnvoyException(err_msg); + } +} + +static inline void throwError(const redisReply& reply) { + ASSERT(reply.type == REDIS_REPLY_ERROR); + + if (reply.str == nullptr) { + throw EnvoyException("Null error reply"); + } + + auto err_str = std::string(reply.str, reply.len); + + throw EnvoyException(err_str); +} + +static inline timeval toTimeval(const std::chrono::milliseconds& dur) { + auto sec = std::chrono::duration_cast(dur); + auto msec = std::chrono::duration_cast(dur - sec); + + timeval t; + t.tv_sec = sec.count(); + t.tv_usec = msec.count(); + return t; +} + +} // namespace + +Command::Command(char* data, int len) : data_(data), size_(len) { + if (data == nullptr || len < 0) { + throw EnvoyException("failed to format command"); + } +} + +Command::Command(Command&& that) noexcept { move(std::move(that)); } + +Command& Command::operator=(Command&& that) noexcept { + if (this != &that) { + move(std::move(that)); + } + + return *this; +} + +Command::~Command() noexcept { + if (data_ != nullptr) { + redisFreeCommand(data_); + } +} + +const char* Command::data() const noexcept { return data_; } + +int Command::size() const noexcept { return size_; } + +void Command::move(Command&& that) noexcept { + data_ = that.data_; + size_ = that.size_; + that.data_ = nullptr; + that.size_ = 0; +} + +template Command cmd(const char* format, Args&&... args) { + char* data = nullptr; + auto len = redisFormatCommand(&data, format, std::forward(args)...); + + return Command(data, len); +} + +template class Handler { +public: + Handler(Callback c) : c_(c) {} + + static void callback(redisAsyncContext* ctx, void* reply, void* privdata) { + (static_cast*>(privdata))->operator()(ctx, reply); + } + + void operator()(redisAsyncContext* ctx, void* reply) { + c_(static_cast(reply)); + redisContext* c = &(ctx->c); + if (c->flags & REDIS_SUBSCRIBED) + return; + delete (this); + } + +private: + Callback c_; +}; + +template class CommandEvent : public BaseEvent { +public: + explicit CommandEvent(Command cmd, Handler* cb) + : cmd_(std::move(cmd)), cb_(std::move(cb)) {} + + void handle(redisAsyncContext* ctx) override { + if (redisAsyncFormattedCommand(ctx, Handler::callback, cb_, cmd_.data(), + cmd_.size()) != REDIS_OK) { + throwError(ctx->c, "failed to send command"); + } + } + +private: + Command cmd_; + Handler* cb_; +}; + +template using CommandEventUPtr = std::unique_ptr>; + +Connection::Connection(ConnectionOptionsPtr opts, Event::Dispatcher& dispatcher, CachePtr cache, + Random::RandomGenerator& random, bool subscribed) + : opts_(opts), dispatcher_(dispatcher), cache_(cache), subscribed_(subscribed), + base_(&static_cast(&dispatcher_)->base()), + event_callback_(dispatcher_.createSchedulableCallback([this]() { eventCallback(); })), + random_(random) { + event_callback_->scheduleCallbackNextIteration(); + backoff_strategy_ = std::make_unique( + RetryInitialDelayMs, RetryMaxDelayMs, random_); + retry_timer_ = dispatcher_.createTimer([this]() -> void { connect(); }); + + connect(); +} + +Connection::~Connection() { + if (event_callback_ != nullptr && event_callback_->enabled()) { + event_callback_->cancel(); + } + if (retry_timer_ && retry_timer_->enabled()) { + retry_timer_->disableTimer(); + } +} + +template void Connection::execCommand(Command cmd, Callback callback) { + auto event = CommandEventUPtr( + new CommandEvent(std::move(cmd), new Handler(callback))); + send(std::move(event)); +} + +void Connection::psubscribe(std::string pattern) { + auto lambdaOnPmsg = [&](redisReply* reply) { + if (reply && reply->elements == 4) { + if (reply->element[0] && reply->element[0]->str && + !strncasecmp(reply->element[0]->str, "pmessage", reply->element[0]->len) && + reply->element[3] && reply->element[3]->str) { + auto hostname = std::string(reply->element[3]->str); + if (cache_) + cache_->onSetCache(hostname); + } + } + }; + execCommand(cmd("PSUBSCRIBE %b", pattern.data(), pattern.size()), lambdaOnPmsg); +} + +void Connection::get(std::string hostname) { + auto lambdaOnResult = [&, hostname](redisReply* reply) { + if (reply && reply->str) { + auto status = std::string(reply->str); + if (cache_) + cache_->onGetCache(hostname, status); + }; + }; + execCommand(cmd("GET %b", hostname.data(), hostname.size()), lambdaOnResult); +} + +bool Connection::needAuth() const { return !opts_->password.empty() || opts_->user != "default"; } + +bool Connection::needSelectDb() const { return opts_->db != 0; } + +void Connection::connectingCallback() { + if (needAuth()) { + auth(); + } else if (needSelectDb()) { + selectDb(); + } else { + setConnected(); + } +} + +void Connection::authingCallback() { + if (needSelectDb()) { + selectDb(); + } else { + setConnected(); + } +} + +void Connection::selectDbCallback() { setConnected(); } + +void Connection::auth() { + ASSERT(!isDisconnected()); + + if (opts_->user == "default") { + if (redisAsyncCommand(ctx_, setOptionsCallback, nullptr, "AUTH %b", opts_->password.data(), + opts_->password.size()) != REDIS_OK) { + throw EnvoyException("failed to send auth command"); + } + } else { + // Redis 6.0 or latter + if (redisAsyncCommand(ctx_, setOptionsCallback, nullptr, "AUTH %b %b", opts_->user.data(), + opts_->user.size(), opts_->password.data(), + opts_->password.size()) != REDIS_OK) { + throw EnvoyException("failed to send auth command"); + } + } + + state_ = State::AUTHING; +} + +void Connection::selectDb() { + ASSERT(!isDisconnected()); + + if (redisAsyncCommand(ctx_, setOptionsCallback, nullptr, "SELECT %d", opts_->db) != REDIS_OK) { + throw EnvoyException("failed to send select command"); + } + + state_ = State::SELECTING_DB; +} + +void Connection::setConnected() { + state_ = State::CONNECTED; + + if (subscribed_) { + std::string pattern = absl::StrFormat(DEFAULT_PATTERN, opts_->db); + psubscribe(pattern); + } + + // Send pending commands. + send(); +} + +bool Connection::isDisconnected() const noexcept { return state_ == State::DISCONNECTED; } + +void Connection::fail(const EnvoyException* /*err*/) { + ctx_ = nullptr; + + state_ = State::DISCONNECTED; + + setRetryTimer(); +} + +void Connection::setOptionsCallback(redisAsyncContext* ctx, void* r, void*) { + ASSERT(ctx != nullptr); + + auto* connection = static_cast(ctx->data); + ASSERT(connection != nullptr); + + redisReply* reply = static_cast(r); + if (reply == nullptr) { + // Connection has bee closed. + return; + } + + try { + if (isError(*reply)) { + throwError(*reply); + } + + } catch (const EnvoyException& e) { + connection->disconnect(&e); + + return; + } + + connection->connectCallback(nullptr); +} + +void Connection::eventCallback() { + switch (state_.load()) { + case State::CONNECTED: + send(); + break; + + default: + break; + } + + if (event_callback_ != nullptr) { + event_callback_->scheduleCallbackNextIteration(); + } +} + +void Connection::connectCallback(const EnvoyException* err) { + if (err) { + // Failed to connect to Redis + fail(err); + + return; + } + + // Connect OK. + try { + switch (state_.load()) { + case State::CONNECTING: + connectingCallback(); + break; + + case State::AUTHING: + authingCallback(); + break; + + case State::SELECTING_DB: + selectDbCallback(); + break; + + default: + setConnected(); + } + } catch (const EnvoyException& e) { + disconnect(&e); + } +} + +void Connection::disconnect(const EnvoyException* err) { + if (ctx_ != nullptr) { + disableDisconnectCallback(); + redisAsyncDisconnect(ctx_); + } + + fail(err); +} + +void Connection::disableDisconnectCallback() { + ASSERT(ctx_ != nullptr); + + auto* connection = static_cast(ctx_->data); + + ASSERT(connection != nullptr); + + connection->run_disconnect_callback_ = false; +} + +void Connection::send(BaseEventUPtr event) { + { + Thread::LockGuard write_lock(events_lock_); + + events_.push_back(std::move(event)); + } +} + +void Connection::send() { + auto events = getEvents(); + for (auto idx = 0U; idx != events.size(); ++idx) { + auto& event = events[idx]; + try { + event->handle(ctx_); + } catch (const EnvoyException& e) { + // Failed to send command, fail subsequent events. + disconnect(&e); + + break; + } + } +} + +std::vector Connection::getEvents() { + std::vector events; + { + Thread::LockGuard write_lock(events_lock_); + + events.swap(events_); + } + + return events; +} + +CTlsContextPtr Connection::secureConnection(redisContext& ctx, const TlsOptions& opts) { + redisInitOpenSSL(); + auto c_str = [](const std::string& s) { return s.empty() ? nullptr : s.c_str(); }; + + redisSSLContextError err; + auto tls_ctx = + CTlsContextPtr(redisCreateSSLContext(c_str(opts.cacert), c_str(opts.capath), c_str(opts.cert), + c_str(opts.key), c_str(opts.sni), &err)); + if (!tls_ctx) { + throw EnvoyException(std::string("failed to create TLS context: ") + + redisSSLContextGetError(err)); + } + + if (redisInitiateSSLWithContext(&ctx, tls_ctx.get()) != REDIS_OK) { + throwError(ctx, "Failed to initialize TLS connection"); + } + + return tls_ctx; +} + +ConnectionOptions& Connection::options() { + { + Thread::LockGuard lock(opts_lock_); + return *opts_; + } +} + +void Connection::connect() { + if (!isDisconnected()) { + return; + } + try { + auto opts = options(); + + auto ctx = connect(opts); + + ASSERT(ctx && ctx->err == REDIS_OK); + + const auto& tls_opts = opts.tls; + CTlsContextPtr tls_ctx; + if (tls_opts.enabled) { + tls_ctx = secureConnection(ctx->c, tls_opts); + } + + watch(*ctx); + + tls_ctx_ = std::move(tls_ctx); + ctx_ = ctx.release(); + + state_ = State::CONNECTING; + } catch (const EnvoyException& e) { + fail(&e); + } +} + +void Connection::watch(redisAsyncContext& ctx) { + if (redisLibeventAttach(&ctx, base_) != REDIS_OK) { + throw EnvoyException("failed to attach to event loop"); + } + + redisAsyncSetConnectCallback(&ctx, connected); + redisAsyncSetDisconnectCallback(&ctx, disconnected); +} + +void Connection::connected(const redisAsyncContext* ctx, int status) { + ASSERT(ctx != nullptr); + + auto* connection = static_cast(ctx->data); + ASSERT(connection != nullptr); + + const EnvoyException* err = nullptr; + if (status != REDIS_OK) { + try { + throwError(ctx->c, "failed to connect to server"); + } catch (const EnvoyException& e) { + err = &e; + } + } + + connection->connectCallback(err); +} + +void Connection::disconnected(const redisAsyncContext* ctx, int status) { + ASSERT(ctx != nullptr); + + auto* connection = static_cast(ctx->data); + ASSERT(connection != nullptr); + + if (!connection->run_disconnect_callback_) { + return; + } + + const EnvoyException* err = nullptr; + if (status != REDIS_OK) { + try { + throwError(ctx->c, "failed to disconnect from server"); + } catch (const EnvoyException& e) { + err = &e; + } + } + + connection->disconnectCallback(err); +} + +void Connection::disconnectCallback(const EnvoyException* err) { fail(err); } + +CAsyncContextPtr Connection::connect(const ConnectionOptions& opts) { + redisOptions redis_opts; + std::memset(&redis_opts, 0, sizeof(redis_opts)); + + timeval connect_timeout; + if (opts.connect_timeout > std::chrono::milliseconds(0)) { + connect_timeout = toTimeval(opts.connect_timeout); + redis_opts.connect_timeout = &connect_timeout; + } + timeval command_timeout; + if (opts.command_timeout > std::chrono::milliseconds(0)) { + command_timeout = toTimeval(opts.command_timeout); + redis_opts.command_timeout = &command_timeout; + } + + redis_opts.type = REDIS_CONN_TCP; + redis_opts.endpoint.tcp.ip = opts.host.c_str(); + redis_opts.endpoint.tcp.port = opts.port; + + auto* context = redisAsyncConnectWithOptions(&redis_opts); + if (context == nullptr) { + throw EnvoyException("Failed to allocate memory for connection."); + } + + auto ctx = CAsyncContextPtr(context); + if (ctx->err != REDIS_OK) { + throwError(ctx->c, "failed to connect to server"); + } + + ctx->data = static_cast(this); + + return ctx; +} + +void Connection::setRetryTimer() { + retry_timer_->enableTimer(std::chrono::milliseconds(backoff_strategy_->nextBackOffMs())); +} + +CacheImpl::CacheImpl(ConnectionOptionsPtr opts, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random) + : get_connection_(opts, dispatcher, static_cast(this), random, false), + set_connection_(opts, dispatcher, static_cast(this), random, true) {} + +void CacheImpl::onGetCache(const std::string& hostname, const std::string& status) { + if (!host_status_map_.contains(hostname)) { + return; + } + try { + const std::string status_document_value = std::string(status); + Json::ObjectSharedPtr document_json; + document_json = Json::Factory::loadFromString(status_document_value); + const bool is_healthy = document_json->getBoolean(IS_HEALTHY); + host_status_map_.insert_or_assign(hostname, is_healthy); + } catch (EnvoyException& e) { + ENVOY_LOG(error, + "CachedHealthChecker Cache onGetCache. Could not parse health status for host " + "'{}': {}", + hostname, e.what()); + } +} + +void CacheImpl::add(const std::string& hostname) { + if (!host_status_map_.contains(hostname)) { + host_status_map_.emplace(hostname, false); + } + getCache(hostname); +} + +bool CacheImpl::get(const std::string& hostname) { + auto itr = host_status_map_.find(hostname); + if (itr == host_status_map_.end()) { + host_status_map_.emplace(hostname, false); + getCache(hostname); + return false; + } + return itr->second; +} + +} // namespace CachedHealthChecker +} // namespace HealthCheckers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/health_checkers/cached/hiredis.h b/source/extensions/health_checkers/cached/hiredis.h new file mode 100644 index 000000000000..f4fdac15c3b7 --- /dev/null +++ b/source/extensions/health_checkers/cached/hiredis.h @@ -0,0 +1,209 @@ +#pragma once + +#include +#include +#include + +#include +#include + +#include "envoy/common/exception.h" +#include "envoy/common/random_generator.h" + +#include "source/common/common/backoff_strategy.h" +#include "source/common/common/c_smart_ptr.h" +#include "source/common/common/thread.h" +#include "source/common/config/utility.h" +#include "source/common/json/json_loader.h" +#include "source/extensions/health_checkers/cached/client.h" + +#include "absl/container/node_hash_map.h" +#include "absl/strings/str_format.h" + +#ifdef _MSC_VER + +#ifndef strncasecmp +#define strncasecmp strnicmp +#endif + +#endif + +namespace Envoy { +namespace Extensions { +namespace HealthCheckers { +namespace CachedHealthChecker { + +namespace { + +constexpr char IS_HEALTHY[] = "IsHealthy"; + +constexpr char DEFAULT_PATTERN[] = "__keyevent@%u__:set"; + +constexpr uint32_t RetryInitialDelayMs = 257; + +constexpr uint32_t RetryMaxDelayMs = 32257; + +} // namespace + +class Command { +public: + Command(char* data, int len); + + Command(const Command&) = delete; + Command& operator=(const Command&) = delete; + + Command(Command&& that) noexcept; + + Command& operator=(Command&& that) noexcept; + + ~Command() noexcept; + + const char* data() const noexcept; + + int size() const noexcept; + +private: + void move(Command&& that) noexcept; + + char* data_ = nullptr; + int size_ = 0; +}; + +class BaseEvent { +public: + virtual ~BaseEvent() = default; + + virtual void handle(redisAsyncContext* ctx) = 0; +}; + +using BaseEventUPtr = std::unique_ptr; + +using CTlsContextPtr = CSmartPtr; + +using CAsyncContextPtr = CSmartPtr; + +class Connection : public Logger::Loggable { +public: + Connection(ConnectionOptionsPtr opts, Event::Dispatcher& dispatcher, CachePtr cache, + Random::RandomGenerator& random, bool subscribed); + + ~Connection(); + + template void execCommand(Command cmd, Callback callback); + + void psubscribe(std::string pattern); + + void get(std::string hostname); + +private: + bool needAuth() const; + + bool needSelectDb() const; + + void connectingCallback(); + + void authingCallback(); + + void selectDbCallback(); + + void auth(); + + void selectDb(); + + void setConnected(); + + bool isDisconnected() const noexcept; + + void fail(const EnvoyException* /*err*/); + + static void setOptionsCallback(redisAsyncContext* ctx, void* r, void*); + + void eventCallback(); + + void connectCallback(const EnvoyException* err); + + void disconnect(const EnvoyException* err); + + void disableDisconnectCallback(); + + void send(BaseEventUPtr event); + + void send(); + + std::vector getEvents(); + + CTlsContextPtr secureConnection(redisContext& ctx, const TlsOptions& opts); + + ConnectionOptions& options(); + + void connect(); + + void watch(redisAsyncContext& ctx); + + static void connected(const redisAsyncContext* ctx, int status); + + static void disconnected(const redisAsyncContext* ctx, int status); + + void disconnectCallback(const EnvoyException* err); + + CAsyncContextPtr connect(const ConnectionOptions& opts); + + void setRetryTimer(); + + enum class State { + DISCONNECTED = 0, + CONNECTING, + AUTHING, + SELECTING_DB, + CONNECTED, + }; + + std::atomic state_{State::DISCONNECTED}; + + ConnectionOptionsPtr opts_; + Event::Dispatcher& dispatcher_; + CachePtr cache_; + bool subscribed_ = false; + struct event_base* base_; + Event::SchedulableCallbackPtr event_callback_; + + CTlsContextPtr tls_ctx_; + redisAsyncContext* ctx_ = nullptr; + std::vector> events_; + Thread::MutexBasicLockable events_lock_; + Thread::MutexBasicLockable opts_lock_; + bool run_disconnect_callback_ = true; + + BackOffStrategyPtr backoff_strategy_; + Event::TimerPtr retry_timer_; + Random::RandomGenerator& random_; +}; + +class CacheImpl : public Cache, public Logger::Loggable { +public: + CacheImpl(ConnectionOptionsPtr opts, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random); + + void onSetCache(const std::string& hostname) override { getCache(hostname); } + + void onGetCache(const std::string& hostname, const std::string& status) override; + + void add(const std::string& hostname) override; + + void remove(const std::string& hostname) override { host_status_map_.erase(hostname); } + + bool get(const std::string& hostname) override; + +private: + void getCache(const std::string& hostname) { get_connection_.get(hostname); } + + Connection get_connection_; + Connection set_connection_; + + absl::node_hash_map host_status_map_; +}; + +} // namespace CachedHealthChecker +} // namespace HealthCheckers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/health_checkers/cached/utility.h b/source/extensions/health_checkers/cached/utility.h new file mode 100644 index 000000000000..1e1829ec759e --- /dev/null +++ b/source/extensions/health_checkers/cached/utility.h @@ -0,0 +1,30 @@ +#pragma once + +#include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/extensions/health_checkers/cached/v3/cached.pb.h" +#include "envoy/extensions/health_checkers/cached/v3/cached.pb.validate.h" + +#include "source/common/config/utility.h" +#include "source/common/protobuf/protobuf.h" +#include "source/common/protobuf/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HealthCheckers { +namespace CachedHealthChecker { + +static const envoy::extensions::health_checkers::cached::v3::Cached +getCachedHealthCheckConfig(const envoy::config::core::v3::HealthCheck& health_check_config, + ProtobufMessage::ValidationVisitor& validation_visitor) { + ProtobufTypes::MessagePtr config = + ProtobufTypes::MessagePtr{new envoy::extensions::health_checkers::cached::v3::Cached()}; + Envoy::Config::Utility::translateOpaqueConfig( + health_check_config.custom_health_check().typed_config(), validation_visitor, *config); + return MessageUtil::downcastAndValidate< + const envoy::extensions::health_checkers::cached::v3::Cached&>(*config, validation_visitor); +} + +} // namespace CachedHealthChecker +} // namespace HealthCheckers +} // namespace Extensions +} // namespace Envoy diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 860f1c2d601e..24eacba6598b 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -76,10 +76,17 @@ TEST(HealthCheckerFactoryTest, GrpcHealthCheckHTTP2NotConfiguredException) { AccessLog::MockAccessLogManager log_manager; NiceMock validation_visitor; Api::MockApi api; + class FakeSingletonManager : public Singleton::Manager { + public: + Singleton::InstanceSharedPtr get(const std::string&, Singleton::SingletonFactoryCb) override { + return nullptr; + } + }; + FakeSingletonManager fsm; EXPECT_THROW_WITH_MESSAGE( HealthCheckerFactory::create(createGrpcHealthCheckConfig(), cluster, runtime, dispatcher, - log_manager, validation_visitor, api), + log_manager, validation_visitor, api, fsm), EnvoyException, "fake_cluster cluster must support HTTP/2 for gRPC healthchecking"); } @@ -94,11 +101,18 @@ TEST(HealthCheckerFactoryTest, CreateGrpc) { AccessLog::MockAccessLogManager log_manager; NiceMock validation_visitor; NiceMock api; + class FakeSingletonManager : public Singleton::Manager { + public: + Singleton::InstanceSharedPtr get(const std::string&, Singleton::SingletonFactoryCb) override { + return nullptr; + } + }; + FakeSingletonManager fsm; EXPECT_NE(nullptr, dynamic_cast( HealthCheckerFactory::create(createGrpcHealthCheckConfig(), cluster, runtime, - dispatcher, log_manager, validation_visitor, api) + dispatcher, log_manager, validation_visitor, api, fsm) .get())); } diff --git a/test/extensions/health_checkers/cached/BUILD b/test/extensions/health_checkers/cached/BUILD new file mode 100644 index 000000000000..6e2092b01060 --- /dev/null +++ b/test/extensions/health_checkers/cached/BUILD @@ -0,0 +1,30 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_names = ["envoy.health_checkers.cached"], + deps = [ + "//source/common/upstream:health_checker_lib", + "//source/extensions/health_checkers/cached:config", + "//test/common/upstream:utility_lib", + "//test/mocks/access_log:access_log_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:health_checker_factory_context_mocks", + "//test/mocks/upstream:health_checker_mocks", + "//test/mocks/upstream:priority_set_mocks", + "//test/test_common:test_runtime_lib", + ], +) diff --git a/test/extensions/health_checkers/cached/config_test.cc b/test/extensions/health_checkers/cached/config_test.cc new file mode 100644 index 000000000000..01b47b48f56f --- /dev/null +++ b/test/extensions/health_checkers/cached/config_test.cc @@ -0,0 +1,140 @@ +#include + +#include "source/common/upstream/health_checker_impl.h" +#include "source/extensions/health_checkers/cached/config.h" + +#include "test/common/upstream/utility.h" +#include "test/mocks/access_log/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/health_checker_factory_context.h" +#include "test/mocks/upstream/health_checker.h" +#include "test/mocks/upstream/priority_set.h" +#include "test/test_common/test_runtime.h" + +namespace Envoy { +namespace Extensions { +namespace HealthCheckers { +namespace CachedHealthChecker { +namespace { + +using CustomCachedHealthChecker = + Extensions::HealthCheckers::CachedHealthChecker::CachedHealthChecker; + +TEST(HealthCheckerFactoryTest, CreateCached) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter: 1s + unhealthy_threshold: 1 + healthy_threshold: 1 + custom_health_check: + name: cached + typed_config: + "@type": type.googleapis.com/envoy.extensions.health_checkers.cached.v3.Cached + host: localhost + port: 6400 + password: foobared + db: 100 + tls_options: + enabled: true + cacert: /etc/redis/ca.crt + cert: /etc/redis/client.crt + key: /etc/redis/client.key + )EOF"; + + NiceMock context; + + CachedHealthCheckerFactory factory; + EXPECT_NE( + nullptr, + dynamic_cast( + factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV3Yaml(yaml), context) + .get())); +} + +TEST(HealthCheckerFactoryTest, CreateCachedWithLogHCFailure) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter: 1s + unhealthy_threshold: 1 + healthy_threshold: 1 + custom_health_check: + name: cached + typed_config: + "@type": type.googleapis.com/envoy.extensions.health_checkers.cached.v3.Cached + host: localhost + port: 6400 + password: foobared + db: 100 + tls_options: + enabled: true + cacert: /etc/redis/ca.crt + cert: /etc/redis/client.crt + key: /etc/redis/client.key + always_log_health_check_failures: true + )EOF"; + + NiceMock context; + + CachedHealthCheckerFactory factory; + EXPECT_NE( + nullptr, + dynamic_cast( + factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV3Yaml(yaml), context) + .get())); +} + +TEST(HealthCheckerFactoryTest, CreateCachedViaUpstreamHealthCheckerFactory) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter: 1s + unhealthy_threshold: 1 + healthy_threshold: 1 + custom_health_check: + name: cached + typed_config: + "@type": type.googleapis.com/envoy.extensions.health_checkers.cached.v3.Cached + host: localhost + port: 6400 + password: foobared + db: 100 + tls_options: + enabled: true + cacert: /etc/redis/ca.crt + cert: /etc/redis/client.crt + key: /etc/redis/client.key + )EOF"; + + NiceMock cluster; + Runtime::MockLoader runtime; + Random::MockRandomGenerator random; + Event::MockDispatcher dispatcher; + AccessLog::MockAccessLogManager log_manager; + NiceMock api; + class FakeSingletonManager : public Singleton::Manager { + public: + Singleton::InstanceSharedPtr get(const std::string&, Singleton::SingletonFactoryCb) override { + return nullptr; + } + }; + FakeSingletonManager fsm; + + EXPECT_NE(nullptr, + dynamic_cast( + Upstream::HealthCheckerFactory::create( + Upstream::parseHealthCheckFromV3Yaml(yaml), cluster, runtime, dispatcher, + log_manager, ProtobufMessage::getStrictValidationVisitor(), api, fsm) + .get())); +} + +} // namespace +} // namespace CachedHealthChecker +} // namespace HealthCheckers +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/health_checkers/redis/config_test.cc b/test/extensions/health_checkers/redis/config_test.cc index 644bf0538e26..0f3f062cc272 100644 --- a/test/extensions/health_checkers/redis/config_test.cc +++ b/test/extensions/health_checkers/redis/config_test.cc @@ -116,12 +116,19 @@ TEST(HealthCheckerFactoryTest, CreateRedisViaUpstreamHealthCheckerFactory) { Event::MockDispatcher dispatcher; AccessLog::MockAccessLogManager log_manager; NiceMock api; + class FakeSingletonManager : public Singleton::Manager { + public: + Singleton::InstanceSharedPtr get(const std::string&, Singleton::SingletonFactoryCb) override { + return nullptr; + } + }; + FakeSingletonManager fsm; EXPECT_NE(nullptr, dynamic_cast( Upstream::HealthCheckerFactory::create( Upstream::parseHealthCheckFromV3Yaml(yaml), cluster, runtime, dispatcher, - log_manager, ProtobufMessage::getStrictValidationVisitor(), api) + log_manager, ProtobufMessage::getStrictValidationVisitor(), api, fsm) .get())); } } // namespace diff --git a/test/extensions/health_checkers/thrift/config_test.cc b/test/extensions/health_checkers/thrift/config_test.cc index 601ec2709e80..15da627e8d02 100644 --- a/test/extensions/health_checkers/thrift/config_test.cc +++ b/test/extensions/health_checkers/thrift/config_test.cc @@ -190,12 +190,19 @@ TEST(HealthCheckerFactoryTest, CreateThriftViaUpstreamHealthCheckerFactory) { Event::MockDispatcher dispatcher; AccessLog::MockAccessLogManager log_manager; NiceMock api; + class FakeSingletonManager : public Singleton::Manager { + public: + Singleton::InstanceSharedPtr get(const std::string&, Singleton::SingletonFactoryCb) override { + return nullptr; + } + }; + FakeSingletonManager fsm; EXPECT_NE(nullptr, dynamic_cast( Upstream::HealthCheckerFactory::create( Upstream::parseHealthCheckFromV3Yaml(yaml), cluster, runtime, dispatcher, - log_manager, ProtobufMessage::getStrictValidationVisitor(), api) + log_manager, ProtobufMessage::getStrictValidationVisitor(), api, fsm) .get())); } diff --git a/test/mocks/server/health_checker_factory_context.cc b/test/mocks/server/health_checker_factory_context.cc index 45d2de4315a1..dcb2abbc44e9 100644 --- a/test/mocks/server/health_checker_factory_context.cc +++ b/test/mocks/server/health_checker_factory_context.cc @@ -19,6 +19,7 @@ MockHealthCheckerFactoryContext::MockHealthCheckerFactoryContext() { ON_CALL(*this, messageValidationVisitor()) .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(manager_)); } MockHealthCheckerFactoryContext::~MockHealthCheckerFactoryContext() = default; diff --git a/test/mocks/server/health_checker_factory_context.h b/test/mocks/server/health_checker_factory_context.h index d4bc31b438d2..f248ce89d849 100644 --- a/test/mocks/server/health_checker_factory_context.h +++ b/test/mocks/server/health_checker_factory_context.h @@ -29,6 +29,7 @@ class MockHealthCheckerFactoryContext : public virtual HealthCheckerFactoryConte MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); MOCK_METHOD(Api::Api&, api, ()); + MOCK_METHOD(Singleton::Manager&, singletonManager, ()); Upstream::HealthCheckEventLoggerPtr eventLogger() override { if (!event_logger_) { event_logger_ = std::make_unique>(); @@ -36,6 +37,14 @@ class MockHealthCheckerFactoryContext : public virtual HealthCheckerFactoryConte return std::move(event_logger_); } + class FakeSingletonManager : public Singleton::Manager { + public: + Singleton::InstanceSharedPtr get(const std::string&, Singleton::SingletonFactoryCb) override { + return nullptr; + } + }; + FakeSingletonManager manager_; + testing::NiceMock cluster_; testing::NiceMock dispatcher_; testing::NiceMock random_; diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 35e9baab1e75..1d1045d67324 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -556,6 +556,7 @@ canonicalize canonicalized canonicalizer canonicalizing +capath cardinality casted cfg